You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@trafodion.apache.org by li...@apache.org on 2016/12/06 14:50:36 UTC

[02/13] incubator-trafodion git commit: jira 2227

jira 2227


Project: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/commit/8a49f900
Tree: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/tree/8a49f900
Diff: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/diff/8a49f900

Branch: refs/heads/master
Commit: 8a49f9001e07be38bdcd905c47b6d9870bd37fef
Parents: 575490c
Author: unknown <yo...@ESGYNCN.com>
Authored: Mon Nov 14 11:13:15 2016 +0800
Committer: unknown <yo...@ESGYNCN.com>
Committed: Mon Nov 14 11:13:15 2016 +0800

----------------------------------------------------------------------
 core/conn/odbc/src/odbc/Common/QSData.cpp |   17 +
 core/conn/odbc/src/odbc/Common/QSData.h   |   16 +
 core/sql/bin/SqlciErrors.txt              |   13 +
 core/sql/common/NAString.cpp              |    1 +
 core/sql/common/OperTypeEnum.h            |    2 +
 core/sql/common/json.cpp                  | 1139 ++++++++++++++++++++++++
 core/sql/common/json.h                    |  124 +++
 core/sql/common/jsonfuncs.cpp             |  757 ++++++++++++++++
 core/sql/common/stringinfo.cpp            |  258 ++++++
 core/sql/common/stringinfo.h              |  137 +++
 core/sql/exp/ExpErrorEnums.h              |   16 +
 core/sql/exp/ExpPackDefs.cpp              |    6 +
 core/sql/exp/exp_clause.cpp               |    9 +
 core/sql/exp/exp_clause.h                 |    3 +-
 core/sql/exp/exp_function.cpp             |  106 +++
 core/sql/exp/exp_function.h               |   22 +
 core/sql/generator/GenItemFunc.cpp        |    7 +
 core/sql/optimizer/BindItemExpr.cpp       |    5 +-
 core/sql/optimizer/ItemExpr.cpp           |   14 +-
 core/sql/optimizer/SynthType.cpp          |   24 +
 core/sql/parser/ParKeyWords.cpp           |    3 +-
 core/sql/parser/sqlparser.y               |    8 +-
 22 files changed, 2682 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/8a49f900/core/conn/odbc/src/odbc/Common/QSData.cpp
----------------------------------------------------------------------
diff --git a/core/conn/odbc/src/odbc/Common/QSData.cpp b/core/conn/odbc/src/odbc/Common/QSData.cpp
index d6966de..ae1e42d 100644
--- a/core/conn/odbc/src/odbc/Common/QSData.cpp
+++ b/core/conn/odbc/src/odbc/Common/QSData.cpp
@@ -1607,6 +1607,23 @@ string getExeErrorCodeString(ExeErrorCode value)
 	case CLI_RWRS_DECOMPRESS_LENGTH_ERROR: 			return FMT_CLI_RWRS_DECOMPRESS_LENGTH_ERROR;
 	case CLI_NAR_ERROR_DETAILS: 					return FMT_CLI_NAR_ERROR_DETAILS;
 // ---------------------------------------------------------------------
+// Execution errors related to json
+// ---------------------------------------------------------------------
+    case EXE_JSON_INVALID_TOKEN:                    return FMT_EXE_JSON_INVALID_TOKEN;
+    case EXE_JSON_INVALID_VALUE:                    return FMT_EXE_JSON_INVALID_VALUE;
+    case EXE_JSON_INVALID_STRING:                   return FMT_EXE_JSON_INVALID_STRING;
+    case EXE_JSON_INVALID_ARRAY_START:              return FMT_EXE_JSON_INVALID_ARRAY_START;
+    case EXE_JSON_INVALID_ARRAY_NEXT:               return FMT_EXE_JSON_INVALID_ARRAY_NEXT;
+    case EXE_JSON_INVALID_OBJECT_START:             return FMT_EXE_JSON_INVALID_OBJECT_START;
+    case EXE_JSON_INVALID_OBJECT_LABEL:             return FMT_EXE_JSON_INVALID_OBJECT_LABEL;
+    case EXE_JSON_INVALID_OBJECT_NEXT:              return FMT_EXE_JSON_INVALID_OBJECT_NEXT;
+    case EXE_JSON_INVALID_OBJECT_COMMA:             return FMT_EXE_JSON_INVALID_OBJECT_COMMA;
+    case EXE_JSON_INVALID_END:                      return FMT_EXE_JSON_INVALID_END;
+    case EXE_JSON_END_PREMATURELY:                  return FMT_EXE_JSON_END_PREMATURELY;
+    case EXE_JSON_UNEXPECTED_ERROR:                 return FMT_EXE_JSON_UNEXPECTED_ERROR;
+
+    
+// ---------------------------------------------------------------------
 //
 // ---------------------------------------------------------------------
 	default:

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/8a49f900/core/conn/odbc/src/odbc/Common/QSData.h
----------------------------------------------------------------------
diff --git a/core/conn/odbc/src/odbc/Common/QSData.h b/core/conn/odbc/src/odbc/Common/QSData.h
index ab04463..a32fdd8 100644
--- a/core/conn/odbc/src/odbc/Common/QSData.h
+++ b/core/conn/odbc/src/odbc/Common/QSData.h
@@ -3645,6 +3645,22 @@ enum ExeErrorCode
 	CLI_RWRS_DECOMPRESS_LENGTH_ERROR 		= 30046,
 	CLI_NAR_ERROR_DETAILS            		= 30047,
 // ---------------------------------------------------------------------
+// Execution errors related to JSon parser
+// ---------------------------------------------------------------------
+    EXE_JSON_INVALID_TOKEN                  = 32001,
+    EXE_JSON_INVALID_VALUE                  = 32002,
+    EXE_JSON_INVALID_STRING                 = 32003,
+    EXE_JSON_INVALID_ARRAY_START            = 32004,
+    EXE_JSON_INVALID_ARRAY_NEXT             = 32005,
+    EXE_JSON_INVALID_OBJECT_START           = 32006,
+    EXE_JSON_INVALID_OBJECT_LABEL           = 32007,
+    EXE_JSON_INVALID_OBJECT_NEXT            = 32008,
+    EXE_JSON_INVALID_OBJECT_COMMA           = 32009,
+    EXE_JSON_INVALID_END                    = 32010,
+    EXE_JSON_END_PREMATURELY                = 32011,
+    EXE_JSON_UNEXPECTED_ERROR               = 32012,
+
+// ---------------------------------------------------------------------
 // the trailer (use temporarily for new errors that aren't added yet)
 // ---------------------------------------------------------------------
 	EXE_NEW_ERROR							= 8999

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/8a49f900/core/sql/bin/SqlciErrors.txt
----------------------------------------------------------------------
diff --git a/core/sql/bin/SqlciErrors.txt b/core/sql/bin/SqlciErrors.txt
index e4dda67..f8756ff 100644
--- a/core/sql/bin/SqlciErrors.txt
+++ b/core/sql/bin/SqlciErrors.txt
@@ -3925,3 +3925,16 @@ $3~String1.
 30047 ZZZZZ 99999 BEGINNER MAJOR DBADMIN NAR details. HexdRow: $0~String0 ErrNum: $1~Int1 ObjectName: $2~TableName PartitionName: $3~String3 FileNum: $4~Int4 RecNum: $5~Int5
 30048 ZZZZZ 99999  BEGINNER MAJOR DBADMIN Fast Load failed. Number of error rows($0~Int0) inserted into the exception table exceeded the max allowed($1~Int1).
 30049 ZZZZZ 99999  BEGINNER MAJOR DBADMIN Fast Load succeeded. Number of error rows inserted into the exception table is $0~Int0
+
+32001 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Token is invalid.
+32002 ZZZZZ 99999 BEGINNER MAJOR DBADMIN JSON value is invalid.
+32003 ZZZZZ 99999 BEGINNER MAJOR DBADMIN String is invalid.
+32004 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Expected array element or "]", but does not found.
+32005 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Expected "," or "]", but does not found.
+32006 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Expected string or "}", but does not found.
+32007 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Expected ":", but does not found.
+32008 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Expected "," or "}", but does not found.
+32009 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Expected string, but does not found.
+32010 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Expected end of input, but does not found.
+32011 ZZZZZ 99999 BEGINNER MAJOR DBADMIN The input string ended unexpectedly.
+32012 ZZZZZ 99999 BEGINNER MAJOR DBADMIN unexpected json parse state.
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/8a49f900/core/sql/common/NAString.cpp
----------------------------------------------------------------------
diff --git a/core/sql/common/NAString.cpp b/core/sql/common/NAString.cpp
index 2e660ec..8f860dd 100644
--- a/core/sql/common/NAString.cpp
+++ b/core/sql/common/NAString.cpp
@@ -1153,6 +1153,7 @@ static NABoolean tokIsFuncOrParenKeyword(const NAString &sqlText,
         "GROUP_CONCAT",        // MySQL-extension
 	"HASHPARTFUNC ",       // Tandem-extension
 	"HOUR ",               // Datatype with scales/precisions/length
+	"JSON_OBJECT_FIELD_TEXT" //json_object_field_text
 	"JULIANTIMESTAMP ",    // Tandem-extension
 	"LCASE ",              // Tandem-extension
 	"LOCATE ",             // Tandem-extension

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/8a49f900/core/sql/common/OperTypeEnum.h
----------------------------------------------------------------------
diff --git a/core/sql/common/OperTypeEnum.h b/core/sql/common/OperTypeEnum.h
index 6d925ad..a18af5e 100644
--- a/core/sql/common/OperTypeEnum.h
+++ b/core/sql/common/OperTypeEnum.h
@@ -508,6 +508,8 @@ enum OperatorTypeEnum {
                         ITM_BITEXTRACT = 2235,
                         ITM_CONVERTTOBITS = 2236,
                         ITM_LOG2= 2237,
+                        // JSON fuctions
+                        ITM_JSONOBJECTFIELDTEXT = 2241,
 
                         // string functions
                         ITM_TRUNC = 2250,

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/8a49f900/core/sql/common/json.cpp
----------------------------------------------------------------------
diff --git a/core/sql/common/json.cpp b/core/sql/common/json.cpp
new file mode 100644
index 0000000..737bb4b
--- /dev/null
+++ b/core/sql/common/json.cpp
@@ -0,0 +1,1139 @@
+#include "stringinfo.h"
+#include "json.h"
+#include <string.h>
+#include <stdlib.h>
+#include "str.h"
+
+/*
+ * The context of the parser is maintained by the recursive descent
+ * mechanism, but is passed explicitly to the error reporting routine
+ * for better diagnostics.
+ */
+typedef enum					/* contexts of JSON parser */
+{
+    JSON_PARSE_VALUE,			/* expecting a value */
+    JSON_PARSE_STRING,			/* expecting a string (for a field name) */
+    JSON_PARSE_ARRAY_START,		/* saw '[', expecting value or ']' */
+    JSON_PARSE_ARRAY_NEXT,		/* saw array element, expecting ',' or ']' */
+    JSON_PARSE_OBJECT_START,	/* saw '{', expecting label or '}' */
+    JSON_PARSE_OBJECT_LABEL,	/* saw object label, expecting ':' */
+    JSON_PARSE_OBJECT_NEXT,		/* saw object value, expecting ',' or '}' */
+    JSON_PARSE_OBJECT_COMMA,	/* saw object ',', expecting next label */
+    JSON_PARSE_END				/* saw the end of a document, expect nothing */
+} JsonParseContext;
+
+typedef enum					/* type categories for datum_to_json */
+{
+    JSONTYPE_NULL,				/* null, so we didn't bother to identify */
+    JSONTYPE_BOOL,				/* boolean (built-in types only) */
+    JSONTYPE_NUMERIC,			/* numeric (ditto) */
+    JSONTYPE_DATE,				/* we use special formatting for datetimes */
+    JSONTYPE_TIMESTAMP,
+    JSONTYPE_TIMESTAMPTZ,
+    JSONTYPE_JSON,				/* JSON itself (and JSONB) */
+    JSONTYPE_ARRAY,				/* array */
+    JSONTYPE_COMPOSITE,			/* composite */
+    JSONTYPE_CAST,				/* something with an explicit cast to JSON */
+    JSONTYPE_OTHER				/* all else */
+} JsonTypeCategory;
+
+static inline JsonReturnType json_lex(JsonLexContext *lex);
+static inline JsonReturnType json_lex_string(JsonLexContext *lex);
+static inline JsonReturnType json_lex_number(JsonLexContext *lex, char *s,
+        bool *num_err, int *total_len);
+static inline JsonReturnType parse_scalar(JsonLexContext *lex, JsonSemAction *sem);
+static JsonReturnType parse_object_field(JsonLexContext *lex, JsonSemAction *sem);
+static JsonReturnType parse_object(JsonLexContext *lex, JsonSemAction *sem);
+static JsonReturnType parse_array_element(JsonLexContext *lex, JsonSemAction *sem);
+static JsonReturnType parse_array(JsonLexContext *lex, JsonSemAction *sem);
+static JsonReturnType report_parse_error(JsonParseContext ctx, JsonLexContext *lex);
+
+
+/* the null action object used for pure validation */
+static JsonSemAction nullSemAction =
+{
+    NULL, NULL, NULL, NULL, NULL,
+    NULL, NULL, NULL, NULL, NULL
+};
+
+/* Recursive Descent parser support routines */
+
+/*
+ * lex_peek
+ *
+ * what is the current look_ahead token?
+*/
+static inline JsonTokenType
+lex_peek(JsonLexContext *lex)
+{
+    return lex->token_type;
+}
+
+/*
+ * lex_accept
+ *
+ * accept the look_ahead token and move the lexer to the next token if the
+ * look_ahead token matches the token parameter. In that case, and if required,
+ * also hand back the de-escaped lexeme.
+ *
+ * returns true if the token matched, false otherwise.
+ */
+static inline JsonReturnType
+lex_accept(JsonLexContext *lex, JsonTokenType token, char **lexeme, bool &ismatched)
+{
+    JsonReturnType ret = JSON_OK;
+    ismatched = false;
+
+    if (lex->token_type == token)
+    {
+        if (lexeme != NULL)
+        {
+            if (lex->token_type == JSON_TOKEN_STRING)
+            {
+                if (lex->strval != NULL)
+                    *lexeme = strdup(lex->strval->data);
+            }
+            else
+            {
+                int			len = (lex->token_terminator - lex->token_start);
+                char	   *tokstr = (char *)malloc(len + 1);
+
+                memcpy(tokstr, lex->token_start, len);
+                tokstr[len] = '\0';
+                *lexeme = tokstr;
+            }
+        }
+        ret = json_lex(lex);
+        if (ret == JSON_OK)
+            ismatched = true;
+    }
+    return ret;
+}
+
+/*
+ * lex_accept
+ *
+ * move the lexer to the next token if the current look_ahead token matches
+ * the parameter token. Otherwise, report an error.
+ */
+static inline JsonReturnType
+lex_expect(JsonParseContext ctx, JsonLexContext *lex, JsonTokenType token)
+{
+    bool ismatched = false;
+    JsonReturnType ret = lex_accept(lex, token, NULL, ismatched);
+    if (ret != JSON_OK)
+        return ret;
+    else if (!ismatched)
+        return JSON_INVALID_TOKEN;
+    else
+        return JSON_OK;
+}
+
+
+/* chars to consider as part of an alphanumeric token */
+#define JSON_ALPHANUMERIC_CHAR(c)  \
+	(((c) >= 'a' && (c) <= 'z') || \
+	 ((c) >= 'A' && (c) <= 'Z') || \
+	 ((c) >= '0' && (c) <= '9') || \
+	 (c) == '_' || \
+	 IS_HIGHBIT_SET(c))
+
+/*
+ * Utility function to check if a string is a valid JSON number.
+ *
+ * str is of length len, and need not be null-terminated.
+ */
+bool
+IsValidJsonNumber(const char *str, int len)
+{
+    bool		numeric_error;
+    int			total_len;
+    JsonLexContext dummy_lex;
+    int ret = 0;
+
+    if (len <= 0)
+        return false;
+
+    /*
+     * json_lex_number expects a leading  '-' to have been eaten already.
+     *
+     * having to cast away the constness of str is ugly, but there's not much
+     * easy alternative.
+     */
+    if (*str == '-')
+    {
+        dummy_lex.input = (char *) str + 1;
+        dummy_lex.input_length = len - 1;
+    }
+    else
+    {
+        dummy_lex.input = (char *) str;
+        dummy_lex.input_length = len;
+    }
+
+    ret = json_lex_number(&dummy_lex, dummy_lex.input, &numeric_error, &total_len);
+    if (ret != JSON_OK)
+        return false;
+
+    return (!numeric_error) && (total_len == dummy_lex.input_length);
+}
+
+/*
+ * makeJsonLexContext
+ *
+ * lex constructor, with or without StringInfo object
+ * for de-escaped lexemes.
+ *
+ * Without is better as it makes the processing faster, so only make one
+ * if really required.
+ *
+ * If you already have the json as a text* value, use the first of these
+ * functions, otherwise use  makeJsonLexContextCstringLen().
+ */
+JsonLexContext *
+makeJsonLexContext(char *json, bool need_escapes)
+{
+    return makeJsonLexContextCstringLen(json, str_len(json), need_escapes);
+}
+
+JsonLexContext *
+makeJsonLexContextCstringLen(char *json, int len, bool need_escapes)
+{
+    JsonLexContext *lex = (JsonLexContext *)malloc(sizeof(JsonLexContext));
+    memset(lex, 0, sizeof(JsonLexContext));
+    lex->input = lex->token_terminator = lex->line_start = json;
+    lex->line_number = 1;
+    lex->input_length = len;
+
+    if (need_escapes)
+        lex->strval = makeStringInfo();
+    return lex;
+}
+
+/*
+ * pg_parse_json
+ *
+ * Publicly visible entry point for the JSON parser.
+ *
+ * lex is a lexing context, set up for the json to be processed by calling
+ * makeJsonLexContext(). sem is a strucure of function pointers to semantic
+ * action routines to be called at appropriate spots during parsing, and a
+ * pointer to a state object to be passed to those routines.
+ */
+JsonReturnType
+pg_parse_json(JsonLexContext *lex, JsonSemAction *sem)
+{
+    JsonTokenType tok;
+    JsonReturnType ret = JSON_OK;
+
+    /* get the initial token */
+    json_lex(lex);
+
+    tok = lex_peek(lex);
+
+    /* parse by recursive descent */
+    switch (tok)
+    {
+    case JSON_TOKEN_OBJECT_START:
+        ret = parse_object(lex, sem);
+        break;
+    case JSON_TOKEN_ARRAY_START:
+        ret = parse_array(lex, sem);
+        break;
+    default:
+        ret = parse_scalar(lex, sem);		/* json can be a bare scalar */
+    }
+    if (ret != JSON_OK)
+        return ret;
+    return lex_expect(JSON_PARSE_END, lex, JSON_TOKEN_END);
+
+}
+
+/*
+ * json_count_array_elements
+ *
+ * Returns number of array elements in lex context at start of array token
+ * until end of array token at same nesting level.
+ *
+ * Designed to be called from array_start routines.
+ */
+JsonReturnType
+json_count_array_elements(JsonLexContext *lex, int &count)
+{
+    JsonLexContext copylex;
+    JsonReturnType ret;
+    /*
+     * It's safe to do this with a shallow copy because the lexical routines
+     * don't scribble on the input. They do scribble on the other pointers
+     * etc, so doing this with a copy makes that safe.
+     */
+    memcpy(&copylex, lex, sizeof(JsonLexContext));
+    copylex.strval = NULL;		/* not interested in values here */
+    copylex.lex_level++;
+
+    count = 0;
+    ret = lex_expect(JSON_PARSE_ARRAY_START, &copylex, JSON_TOKEN_ARRAY_START);
+    if (ret != 0 )
+        return ret;
+    if (lex_peek(&copylex) != JSON_TOKEN_ARRAY_END)
+    {
+        bool ismatched;
+        do
+        {
+            count++;
+            ret = parse_array_element(&copylex, &nullSemAction);
+            if (ret != JSON_OK )
+                return ret;
+            ret = lex_accept(&copylex, JSON_TOKEN_COMMA, NULL, ismatched);
+        }
+        while (ret == JSON_OK && ismatched);
+        if (ret != JSON_OK)
+            return ret;
+    }
+    ret = lex_expect(JSON_PARSE_ARRAY_NEXT, &copylex, JSON_TOKEN_ARRAY_END);
+    if (ret != JSON_OK)
+        return ret;
+    return JSON_OK;
+}
+
+/*
+ *	Recursive Descent parse routines. There is one for each structural
+ *	element in a json document:
+ *	  - scalar (string, number, true, false, null)
+ *	  - array  ( [ ] )
+ *	  - array element
+ *	  - object ( { } )
+ *	  - object field
+ */
+static inline JsonReturnType
+parse_scalar(JsonLexContext *lex, JsonSemAction *sem)
+{
+    char	   *val = NULL;
+    json_scalar_action sfunc = sem->scalar;
+    char	  **valaddr;
+    JsonReturnType ret = JSON_OK;
+    bool ismatched;
+    JsonTokenType tok = lex_peek(lex);
+
+    valaddr = sfunc == NULL ? NULL : &val;
+
+    /* a scalar must be a string, a number, true, false, or null */
+    switch (tok)
+    {
+    case JSON_TOKEN_TRUE:
+        ret = lex_accept(lex, JSON_TOKEN_TRUE, valaddr, ismatched);
+        break;
+    case JSON_TOKEN_FALSE:
+        ret = lex_accept(lex, JSON_TOKEN_FALSE, valaddr, ismatched);
+        break;
+    case JSON_TOKEN_NULL:
+        ret = lex_accept(lex, JSON_TOKEN_NULL, valaddr, ismatched);
+        break;
+    case JSON_TOKEN_NUMBER:
+        ret = lex_accept(lex, JSON_TOKEN_NUMBER, valaddr, ismatched);
+        break;
+    case JSON_TOKEN_STRING:
+        ret = lex_accept(lex, JSON_TOKEN_STRING, valaddr, ismatched);
+        break;
+    default:
+        return report_parse_error(JSON_PARSE_VALUE, lex);
+    }
+    if (ret != JSON_OK)
+    {
+        if( *valaddr != NULL)
+        {
+            free(*valaddr);
+            *valaddr = NULL;
+        }
+        return ret;
+    }
+
+    if (sfunc != NULL)
+        ret = (*sfunc) (sem->semstate, val, tok);
+    if( *valaddr != NULL)
+    {
+        free(*valaddr);
+        *valaddr = NULL;
+    }
+    return ret;
+}
+
+static JsonReturnType
+parse_object_field(JsonLexContext *lex, JsonSemAction *sem)
+{
+    JsonReturnType ret = JSON_OK;
+    /*
+     * An object field is "fieldname" : value where value can be a scalar,
+     * object or array.  Note: in user-facing docs and error messages, we
+     * generally call a field name a "key".
+     */
+
+    char	   *fname = NULL;	/* keep compiler quiet */
+    json_ofield_action ostart = sem->object_field_start;
+    json_ofield_action oend = sem->object_field_end;
+    bool		isnull;
+    bool ismatched;
+    char	  **fnameaddr = NULL;
+    JsonTokenType tok;
+
+    if (ostart != NULL || oend != NULL)
+        fnameaddr = &fname;
+
+    ret = lex_accept(lex, JSON_TOKEN_STRING, fnameaddr, ismatched);
+    if (ret != JSON_OK || !ismatched)
+        ret = JSON_INVALID_TOKEN;
+    if (ret != JSON_OK)
+    {
+        if( *fnameaddr != NULL)
+            free(*fnameaddr);
+        return ret;
+    }
+
+    ret = lex_expect(JSON_PARSE_OBJECT_LABEL, lex, JSON_TOKEN_COLON);
+    if (ret != JSON_OK )
+    {
+        if( *fnameaddr != NULL)
+            free(*fnameaddr);
+        return ret;
+    }
+    tok = lex_peek(lex);
+    isnull = tok == JSON_TOKEN_NULL;
+
+    if (ostart != NULL)
+    {
+        ret = (*ostart) (sem->semstate, fname, isnull);
+        if (ret != JSON_OK)
+        {
+            if( *fnameaddr != NULL)
+                free(*fnameaddr);
+            return ret;
+        }
+    }
+
+    switch (tok)
+    {
+    case JSON_TOKEN_OBJECT_START:
+        ret = parse_object(lex, sem);
+        break;
+    case JSON_TOKEN_ARRAY_START:
+        ret = parse_array(lex, sem);
+        break;
+    default:
+        ret = parse_scalar(lex, sem);
+    }
+    if (ret != JSON_OK)
+    {
+        if( *fnameaddr != NULL)
+            free(*fnameaddr);
+        return ret;
+    }
+
+    if (oend != NULL)
+        ret = (*oend) (sem->semstate, fname, isnull);
+
+    if( *fnameaddr != NULL)
+        free(*fnameaddr);
+    return ret;
+}
+
+static JsonReturnType
+parse_object(JsonLexContext *lex, JsonSemAction *sem)
+{
+    JsonReturnType ret = JSON_OK;
+    bool ismatched;
+    /*
+     * an object is a possibly empty sequence of object fields, separated by
+     * commas and surrounded by curly braces.
+     */
+    json_struct_action ostart = sem->object_start;
+    json_struct_action oend = sem->object_end;
+    JsonTokenType tok;
+
+    if (ostart != NULL)
+    {
+        ret = (*ostart) (sem->semstate);
+        if (ret != JSON_OK)
+            return ret;
+    }
+
+    /*
+     * Data inside an object is at a higher nesting level than the object
+     * itself. Note that we increment this after we call the semantic routine
+     * for the object start and restore it before we call the routine for the
+     * object end.
+     */
+    lex->lex_level++;
+
+    /* we know this will succeeed, just clearing the token */
+    ret = lex_expect(JSON_PARSE_OBJECT_START, lex, JSON_TOKEN_OBJECT_START);
+    if (ret != JSON_OK )
+        return ret;
+    tok = lex_peek(lex);
+    switch (tok)
+    {
+    case JSON_TOKEN_STRING:
+        ret = parse_object_field(lex, sem);
+        if (ret != JSON_OK)
+            return ret;
+        ret = lex_accept(lex, JSON_TOKEN_COMMA, NULL, ismatched);
+        while (ret == JSON_OK && ismatched)
+        {
+            ret = parse_object_field(lex, sem);
+            if (ret != JSON_OK)
+                return ret;
+            ret = lex_accept(lex, JSON_TOKEN_COMMA, NULL, ismatched);
+        }
+
+        if (ret != JSON_OK)
+            return ret;
+        break;
+    case JSON_TOKEN_OBJECT_END:
+        break;
+    default:
+        /* case of an invalid initial token inside the object */
+        return report_parse_error(JSON_PARSE_OBJECT_START, lex);;
+    }
+
+    ret = lex_expect(JSON_PARSE_OBJECT_NEXT, lex, JSON_TOKEN_OBJECT_END);
+    if (ret != JSON_OK )
+        return ret;
+    lex->lex_level--;
+
+    if (oend != NULL)
+        ret = (*oend) (sem->semstate);
+    return ret;
+}
+
+static JsonReturnType
+parse_array_element(JsonLexContext *lex, JsonSemAction *sem)
+{
+    json_aelem_action astart = sem->array_element_start;
+    json_aelem_action aend = sem->array_element_end;
+    JsonTokenType tok = lex_peek(lex);
+    JsonReturnType ret = JSON_OK;
+    bool		isnull;
+
+    isnull = tok == JSON_TOKEN_NULL;
+
+    if (astart != NULL)
+    {
+        ret = (*astart) (sem->semstate, isnull);
+        if (ret != JSON_OK)
+            return ret;
+    }
+
+    /* an array element is any object, array or scalar */
+    switch (tok)
+    {
+    case JSON_TOKEN_OBJECT_START:
+        ret = parse_object(lex, sem);
+        break;
+    case JSON_TOKEN_ARRAY_START:
+        ret = parse_array(lex, sem);
+        break;
+    default:
+        ret = parse_scalar(lex, sem);
+    }
+    if (ret != JSON_OK)
+        return ret;
+
+    if (aend != NULL)
+        ret = (*aend) (sem->semstate, isnull);
+
+    return ret;
+}
+
+static JsonReturnType
+parse_array(JsonLexContext *lex, JsonSemAction *sem)
+{
+    JsonReturnType ret = JSON_OK;
+    bool ismatched;
+    /*
+     * an array is a possibly empty sequence of array elements, separated by
+     * commas and surrounded by square brackets.
+     */
+    json_struct_action astart = sem->array_start;
+    json_struct_action aend = sem->array_end;
+
+    if (astart != NULL)
+    {
+        ret = (*astart) (sem->semstate);
+        if (ret != JSON_OK)
+            return ret;
+    }
+
+    /*
+     * Data inside an array is at a higher nesting level than the array
+     * itself. Note that we increment this after we call the semantic routine
+     * for the array start and restore it before we call the routine for the
+     * array end.
+     */
+    lex->lex_level++;
+
+    ret = lex_expect(JSON_PARSE_ARRAY_START, lex, JSON_TOKEN_ARRAY_START);
+    if (ret != JSON_OK)
+        return ret;
+    if (lex_peek(lex) != JSON_TOKEN_ARRAY_END)
+    {
+        ret = parse_array_element(lex, sem);
+        if (ret != JSON_OK)
+            return ret;
+        ret = lex_accept(lex, JSON_TOKEN_COMMA, NULL, ismatched);
+        while (ret == JSON_OK && ismatched)
+        {
+            parse_array_element(lex, sem);
+            if (ret != JSON_OK)
+                return ret;
+            ret = lex_accept(lex, JSON_TOKEN_COMMA, NULL, ismatched);
+        }
+    }
+    if (ret != JSON_OK)
+        return ret;
+
+    ret = lex_expect(JSON_PARSE_ARRAY_NEXT, lex, JSON_TOKEN_ARRAY_END);
+    if (ret != JSON_OK)
+        return ret;
+    lex->lex_level--;
+
+    if (aend != NULL)
+    {
+        ret = (*aend) (sem->semstate);
+        if (ret != JSON_OK)
+            return ret;
+    }
+    return JSON_OK;
+}
+
+/*
+ * Lex one token from the input stream.
+ */
+static inline JsonReturnType
+json_lex(JsonLexContext *lex)
+{
+    char	   *s;
+    int			len;
+    JsonReturnType ret = JSON_OK;
+
+    /* Skip leading whitespace. */
+    s = lex->token_terminator;
+    len = s - lex->input;
+    while (len < lex->input_length &&
+            (*s == ' ' || *s == '\t' || *s == '\n' || *s == '\r'))
+    {
+        if (*s == '\n')
+            ++lex->line_number;
+        ++s;
+        ++len;
+    }
+    lex->token_start = s;
+
+    /* Determine token type. */
+    if (len >= lex->input_length)
+    {
+        lex->token_start = NULL;
+        lex->prev_token_terminator = lex->token_terminator;
+        lex->token_terminator = s;
+        lex->token_type = JSON_TOKEN_END;
+    }
+    else
+        switch (*s)
+        {
+        /* Single-character token, some kind of punctuation mark. */
+        case '{':
+            lex->prev_token_terminator = lex->token_terminator;
+            lex->token_terminator = s + 1;
+            lex->token_type = JSON_TOKEN_OBJECT_START;
+            break;
+        case '}':
+            lex->prev_token_terminator = lex->token_terminator;
+            lex->token_terminator = s + 1;
+            lex->token_type = JSON_TOKEN_OBJECT_END;
+            break;
+        case '[':
+            lex->prev_token_terminator = lex->token_terminator;
+            lex->token_terminator = s + 1;
+            lex->token_type = JSON_TOKEN_ARRAY_START;
+            break;
+        case ']':
+            lex->prev_token_terminator = lex->token_terminator;
+            lex->token_terminator = s + 1;
+            lex->token_type = JSON_TOKEN_ARRAY_END;
+            break;
+        case ',':
+            lex->prev_token_terminator = lex->token_terminator;
+            lex->token_terminator = s + 1;
+            lex->token_type = JSON_TOKEN_COMMA;
+            break;
+        case ':':
+            lex->prev_token_terminator = lex->token_terminator;
+            lex->token_terminator = s + 1;
+            lex->token_type = JSON_TOKEN_COLON;
+            break;
+        case '"':
+            /* string */
+            ret = json_lex_string(lex);
+            lex->token_type = JSON_TOKEN_STRING;
+            break;
+        case '-':
+            /* Negative number. */
+            ret = json_lex_number(lex, s + 1, NULL, NULL);
+            lex->token_type = JSON_TOKEN_NUMBER;
+            break;
+        case '0':
+        case '1':
+        case '2':
+        case '3':
+        case '4':
+        case '5':
+        case '6':
+        case '7':
+        case '8':
+        case '9':
+            /* Positive number. */
+            ret = json_lex_number(lex, s, NULL, NULL);
+            lex->token_type = JSON_TOKEN_NUMBER;
+            break;
+        default:
+        {
+            char	   *p;
+
+            /*
+             * We're not dealing with a string, number, legal
+             * punctuation mark, or end of string.  The only legal
+             * tokens we might find here are true, false, and null,
+             * but for error reporting purposes we scan until we see a
+             * non-alphanumeric character.  That way, we can report
+             * the whole word as an unexpected token, rather than just
+             * some unintuitive prefix thereof.
+             */
+            for (p = s; p - s < lex->input_length - len && JSON_ALPHANUMERIC_CHAR(*p); p++)
+                /* skip */ ;
+
+            /*
+             * We got some sort of unexpected punctuation or an
+             * otherwise unexpected character, so just complain about
+             * that one character.
+             */
+            if (p == s)
+            {
+                lex->prev_token_terminator = lex->token_terminator;
+                lex->token_terminator = s + 1;
+                return JSON_INVALID_TOKEN;
+            }
+
+            /*
+             * We've got a real alphanumeric token here.  If it
+             * happens to be true, false, or null, all is well.  If
+             * not, error out.
+             */
+            lex->prev_token_terminator = lex->token_terminator;
+            lex->token_terminator = p;
+            if (p - s == 4)
+            {
+                if (memcmp(s, "true", 4) == 0)
+                    lex->token_type = JSON_TOKEN_TRUE;
+                else if (memcmp(s, "null", 4) == 0)
+                    lex->token_type = JSON_TOKEN_NULL;
+                else
+                    return JSON_INVALID_TOKEN;
+            }
+            else if (p - s == 5 && memcmp(s, "false", 5) == 0)
+                lex->token_type = JSON_TOKEN_FALSE;
+            else
+                return JSON_INVALID_TOKEN;
+
+        }
+        }						/* end of switch */
+    return ret;
+}
+
+/*
+ * The next token in the input stream is known to be a string; lex it.
+ */
+static inline JsonReturnType
+json_lex_string(JsonLexContext *lex)
+{
+    char	   *s;
+    int			len;
+    int			hi_surrogate = -1;
+
+    if (lex->strval != NULL)
+        resetStringInfo(lex->strval);
+
+    //ASSERT(lex->input_length > 0);
+    s = lex->token_start;
+    len = lex->token_start - lex->input;
+    for (;;)
+    {
+        s++;
+        len++;
+        /* Premature end of the string. */
+        if (len >= lex->input_length)
+        {
+            lex->token_terminator = s;
+            return JSON_INVALID_TOKEN;
+        }
+        else if (*s == '"')
+            break;
+        else if ((unsigned char) *s < 32)
+        {
+            /* Per RFC4627, these characters MUST be escaped. */
+            /* Since *s isn't printable, exclude it from the context string */
+            return JSON_INVALID_STRING;
+        }
+        else if (*s == '\\')
+        {
+            /* OK, we have an escape character. */
+            s++;
+            len++;
+            if (len >= lex->input_length)
+            {
+                lex->token_terminator = s;
+                return JSON_INVALID_TOKEN;
+            }
+            else if (*s == 'u')
+            {
+                int			i;
+                int			ch = 0;
+
+                for (i = 1; i <= 4; i++)
+                {
+                    s++;
+                    len++;
+                    if (len >= lex->input_length)
+                    {
+                        lex->token_terminator = s;
+                        return JSON_INVALID_TOKEN;
+                    }
+                    else if (*s >= '0' && *s <= '9')
+                        ch = (ch * 16) + (*s - '0');
+                    else if (*s >= 'a' && *s <= 'f')
+                        ch = (ch * 16) + (*s - 'a') + 10;
+                    else if (*s >= 'A' && *s <= 'F')
+                        ch = (ch * 16) + (*s - 'A') + 10;
+                    else
+                        return JSON_INVALID_STRING;
+                }
+                if (lex->strval != NULL)
+                {
+                    char		utf8str[5];
+                    int			utf8len;
+
+                    if (ch >= 0xd800 && ch <= 0xdbff)
+                    {
+                        if (hi_surrogate != -1)
+                            return JSON_INVALID_STRING;
+                        hi_surrogate = (ch & 0x3ff) << 10;
+                        continue;
+                    }
+                    else if (ch >= 0xdc00 && ch <= 0xdfff)
+                    {
+                        if (hi_surrogate == -1)
+                            return JSON_INVALID_STRING;
+                        ch = 0x10000 + hi_surrogate + (ch & 0x3ff);
+                        hi_surrogate = -1;
+                    }
+
+                    if (hi_surrogate != -1)
+                        return JSON_INVALID_STRING;
+
+                    /*
+                     * For UTF8, replace the escape sequence by the actual
+                     * utf8 character in lex->strval. Do this also for other
+                     * encodings if the escape designates an ASCII character,
+                     * otherwise raise an error.
+                     */
+
+                    if (ch == 0)
+                    {
+                        /* We can't allow this, since our TEXT type doesn't */
+                        return JSON_INVALID_STRING;
+                    }
+                    else if (ch <= 0x007f)
+                    {
+                        /*
+                         * This is the only way to designate things like a
+                         * form feed character in JSON, so it's useful in all
+                         * encodings.
+                         */
+                        appendStringInfoChar(lex->strval, (char) ch);
+                    }
+                    else
+                        return JSON_INVALID_STRING;
+
+                }
+            }
+            else if (lex->strval != NULL)
+            {
+                if (hi_surrogate != -1)
+                    return JSON_INVALID_STRING;
+
+                switch (*s)
+                {
+                case '"':
+                case '\\':
+                case '/':
+                    appendStringInfoChar(lex->strval, *s);
+                    break;
+                case 'b':
+                    appendStringInfoChar(lex->strval, '\b');
+                    break;
+                case 'f':
+                    appendStringInfoChar(lex->strval, '\f');
+                    break;
+                case 'n':
+                    appendStringInfoChar(lex->strval, '\n');
+                    break;
+                case 'r':
+                    appendStringInfoChar(lex->strval, '\r');
+                    break;
+                case 't':
+                    appendStringInfoChar(lex->strval, '\t');
+                    break;
+                default:
+                    /* Not a valid string escape, so error out. */
+                    return JSON_INVALID_STRING;
+                }
+            }
+            else if (strchr("\"\\/bfnrt", *s) == NULL)
+            {
+                return JSON_INVALID_STRING;
+            }
+
+        }
+        else if (lex->strval != NULL)
+        {
+            if (hi_surrogate != -1)
+                return JSON_INVALID_STRING;
+
+            appendStringInfoChar(lex->strval, *s);
+        }
+
+    }
+
+    if (hi_surrogate != -1)
+        return JSON_INVALID_STRING;
+
+    /* Hooray, we found the end of the string! */
+    lex->prev_token_terminator = lex->token_terminator;
+    lex->token_terminator = s + 1;
+    return JSON_OK;
+}
+
+/*
+ * The next token in the input stream is known to be a number; lex it.
+ *
+ * In JSON, a number consists of four parts:
+ *
+ * (1) An optional minus sign ('-').
+ *
+ * (2) Either a single '0', or a string of one or more digits that does not
+ *	   begin with a '0'.
+ *
+ * (3) An optional decimal part, consisting of a period ('.') followed by
+ *	   one or more digits.  (Note: While this part can be omitted
+ *	   completely, it's not OK to have only the decimal point without
+ *	   any digits afterwards.)
+ *
+ * (4) An optional exponent part, consisting of 'e' or 'E', optionally
+ *	   followed by '+' or '-', followed by one or more digits.  (Note:
+ *	   As with the decimal part, if 'e' or 'E' is present, it must be
+ *	   followed by at least one digit.)
+ *
+ * The 's' argument to this function points to the ostensible beginning
+ * of part 2 - i.e. the character after any optional minus sign, or the
+ * first character of the string if there is none.
+ *
+ * If num_err is not NULL, we return an error flag to *num_err rather than
+ * raising an error for a badly-formed number.  Also, if total_len is not NULL
+ * the distance from lex->input to the token end+1 is returned to *total_len.
+ */
+static inline JsonReturnType
+json_lex_number(JsonLexContext *lex, char *s,
+                bool *num_err, int *total_len)
+{
+    bool		error = false;
+    int			len = s - lex->input;
+
+    /* Part (1): leading sign indicator. */
+    /* Caller already did this for us; so do nothing. */
+
+    /* Part (2): parse main digit string. */
+    if (len < lex->input_length && *s == '0')
+    {
+        s++;
+        len++;
+    }
+    else if (len < lex->input_length && *s >= '1' && *s <= '9')
+    {
+        do
+        {
+            s++;
+            len++;
+        }
+        while (len < lex->input_length && *s >= '0' && *s <= '9');
+    }
+    else
+        error = true;
+
+    /* Part (3): parse optional decimal portion. */
+    if (len < lex->input_length && *s == '.')
+    {
+        s++;
+        len++;
+        if (len == lex->input_length || *s < '0' || *s > '9')
+            error = true;
+        else
+        {
+            do
+            {
+                s++;
+                len++;
+            }
+            while (len < lex->input_length && *s >= '0' && *s <= '9');
+        }
+    }
+
+    /* Part (4): parse optional exponent. */
+    if (len < lex->input_length && (*s == 'e' || *s == 'E'))
+    {
+        s++;
+        len++;
+        if (len < lex->input_length && (*s == '+' || *s == '-'))
+        {
+            s++;
+            len++;
+        }
+        if (len == lex->input_length || *s < '0' || *s > '9')
+            error = true;
+        else
+        {
+            do
+            {
+                s++;
+                len++;
+            }
+            while (len < lex->input_length && *s >= '0' && *s <= '9');
+        }
+    }
+
+    /*
+     * Check for trailing garbage.  As in json_lex(), any alphanumeric stuff
+     * here should be considered part of the token for error-reporting
+     * purposes.
+     */
+    for (; len < lex->input_length && JSON_ALPHANUMERIC_CHAR(*s); s++, len++)
+        error = true;
+
+    if (total_len != NULL)
+        *total_len = len;
+
+    if (num_err != NULL)
+    {
+        /* let the caller handle any error */
+        *num_err = error;
+    }
+    else
+    {
+        /* return token endpoint */
+        lex->prev_token_terminator = lex->token_terminator;
+        lex->token_terminator = s;
+        /* handle error if any */
+        if (error)
+            return JSON_INVALID_TOKEN;
+    }
+    return JSON_OK;
+}
+
+/*
+ * Produce a JSON string literal, properly escaping characters in the text.
+ */
+void
+escape_json(StringInfo buf, const char *str)
+{
+    const char *p;
+
+    appendStringInfoCharMacro(buf, '\"');
+    for (p = str; *p; p++)
+    {
+        switch (*p)
+        {
+        case '\b':
+            appendStringInfoString(buf, "\\b");
+            break;
+        case '\f':
+            appendStringInfoString(buf, "\\f");
+            break;
+        case '\n':
+            appendStringInfoString(buf, "\\n");
+            break;
+        case '\r':
+            appendStringInfoString(buf, "\\r");
+            break;
+        case '\t':
+            appendStringInfoString(buf, "\\t");
+            break;
+        case '"':
+            appendStringInfoString(buf, "\\\"");
+            break;
+        case '\\':
+            appendStringInfoString(buf, "\\\\");
+            break;
+        default:
+            if ((unsigned char) *p < ' ')
+            	appendStringInfo(buf, "\\u%04x", (int) *p);
+            else
+            	appendStringInfoCharMacro(buf, *p);
+            break;
+        }
+    }
+    appendStringInfoCharMacro(buf, '\"');
+}
+
+
+/*
+ * Report a parse error.
+ *
+ * lex->token_start and lex->token_terminator must identify the current token.
+ */
+static JsonReturnType
+report_parse_error(JsonParseContext ctx, JsonLexContext *lex)
+{
+    /* Handle case where the input ended prematurely. */
+    if (lex->token_start == NULL || lex->token_type == JSON_TOKEN_END)
+        return JSON_END_PREMATURELY;
+
+    switch (ctx)
+    {
+    case JSON_PARSE_VALUE:
+        return JSON_INVALID_VALUE;
+        break;
+    case JSON_PARSE_STRING:
+        return JSON_INVALID_STRING;
+        break;
+    case JSON_PARSE_ARRAY_START:
+        return JSON_INVALID_ARRAY_START;
+        break;
+    case JSON_PARSE_ARRAY_NEXT:
+        return JSON_INVALID_ARRAY_NEXT;
+        break;
+    case JSON_PARSE_OBJECT_START:
+        return JSON_INVALID_OBJECT_START;
+        break;
+    case JSON_PARSE_OBJECT_LABEL:
+        return JSON_INVALID_OBJECT_LABEL;
+        break;
+    case JSON_PARSE_OBJECT_NEXT:
+        return JSON_INVALID_OBJECT_NEXT;
+        break;
+    case JSON_PARSE_OBJECT_COMMA:
+        return JSON_INVALID_OBJECT_COMMA;
+        break;
+    case JSON_PARSE_END:
+        return JSON_INVALID_END;
+        break;
+    default:
+        return JSON_UNEXPECTED_ERROR;
+    }
+}
+

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/8a49f900/core/sql/common/json.h
----------------------------------------------------------------------
diff --git a/core/sql/common/json.h b/core/sql/common/json.h
new file mode 100644
index 0000000..40bd5da
--- /dev/null
+++ b/core/sql/common/json.h
@@ -0,0 +1,124 @@
+#ifndef JSON_H
+#define JSON_H
+
+#include "stringinfo.h"
+
+#ifndef NULL
+#define NULL	((void *) 0)
+#endif
+
+#ifndef true
+#define true	((bool) 1)
+#endif
+
+#ifndef false
+#define false	((bool) 0)
+#endif
+
+#define HIGHBIT					(0x80)
+#define IS_HIGHBIT_SET(ch)		((unsigned char)(ch) & HIGHBIT)
+
+typedef enum
+{
+    JSON_TOKEN_INVALID,
+    JSON_TOKEN_STRING,
+    JSON_TOKEN_NUMBER,
+    JSON_TOKEN_OBJECT_START,
+    JSON_TOKEN_OBJECT_END,
+    JSON_TOKEN_ARRAY_START,
+    JSON_TOKEN_ARRAY_END,
+    JSON_TOKEN_COMMA,
+    JSON_TOKEN_COLON,
+    JSON_TOKEN_TRUE,
+    JSON_TOKEN_FALSE,
+    JSON_TOKEN_NULL,
+    JSON_TOKEN_END
+} JsonTokenType;
+
+typedef enum					/* contexts of JSON parser */
+{
+    JSON_OK = 0,
+    JSON_INVALID_TOKEN,
+    JSON_INVALID_VALUE,			/* expecting a value */
+    JSON_INVALID_STRING,			/* expecting a string (for a field name) */
+    JSON_INVALID_ARRAY_START,		/* saw '[', expecting value or ']' */
+    JSON_INVALID_ARRAY_NEXT,		/* saw array element, expecting ',' or ']' */
+    JSON_INVALID_OBJECT_START,	/* saw '{', expecting label or '}' */
+    JSON_INVALID_OBJECT_LABEL,	/* saw object label, expecting ':' */
+    JSON_INVALID_OBJECT_NEXT,		/* saw object value, expecting ',' or '}' */
+    JSON_INVALID_OBJECT_COMMA,	/* saw object ',', expecting next label */
+    JSON_INVALID_END,				/* saw the end of a document, expect nothing */
+    JSON_END_PREMATURELY,       /*the input ended prematurely*/
+    JSON_UNEXPECTED_ERROR
+} JsonReturnType;
+
+/*
+ * All the fields in this structure should be treated as read-only.
+ *
+ * If strval is not null, then it should contain the de-escaped value
+ * of the lexeme if it's a string. Otherwise most of these field names
+ * should be self-explanatory.
+ *
+ * line_number and line_start are principally for use by the parser's
+ * error reporting routines.
+ * token_terminator and prev_token_terminator point to the character
+ * AFTER the end of the token, i.e. where there would be a nul byte
+ * if we were using nul-terminated strings.
+ */
+typedef struct JsonLexContext
+{
+    char	   *input;
+    int			input_length;
+    char	   *token_start;
+    char	   *token_terminator;
+    char	   *prev_token_terminator;
+    JsonTokenType token_type;
+    int			lex_level;
+    int			line_number;
+    char	   *line_start;
+    StringInfo	strval;
+} JsonLexContext;
+
+typedef JsonReturnType (*json_struct_action) (void *state);
+typedef JsonReturnType (*json_ofield_action) (void *state, char *fname, bool isnull);
+typedef JsonReturnType (*json_aelem_action) (void *state, bool isnull);
+typedef JsonReturnType (*json_scalar_action) (void *state, char *token, JsonTokenType tokentype);
+
+/*
+ * Semantic Action structure for use in parsing json.
+ * Any of these actions can be NULL, in which case nothing is done at that
+ * point, Likewise, semstate can be NULL. Using an all-NULL structure amounts
+ * to doing a pure parse with no side-effects, and is therefore exactly
+ * what the json input routines do.
+ *
+ * The 'fname' and 'token' strings passed to these actions are palloc'd.
+ * They are not free'd or used further by the parser, so the action function
+ * is free to do what it wishes with them.
+ */
+typedef struct JsonSemAction
+{
+    void	   *semstate;
+    json_struct_action object_start;
+    json_struct_action object_end;
+    json_struct_action array_start;
+    json_struct_action array_end;
+    json_ofield_action object_field_start;
+    json_ofield_action object_field_end;
+    json_aelem_action array_element_start;
+    json_aelem_action array_element_end;
+    json_scalar_action scalar;
+} JsonSemAction;
+
+/* functions in jsonfuncs.c */
+extern JsonReturnType json_object_field_text(char *json, char *fieldName);
+extern JsonReturnType json_extract_path(char **result, char *json, short nargs, ...);
+extern JsonReturnType json_extract_path_text(char **result, char *json, short nargs, ...);
+
+JsonLexContext *makeJsonLexContext(char *json, bool need_escapes);
+JsonLexContext *makeJsonLexContextCstringLen(char *json, int len, bool need_escapes);
+JsonReturnType pg_parse_json(JsonLexContext *lex, JsonSemAction *sem);
+JsonReturnType json_count_array_elements(JsonLexContext *lex, int &count);
+void escape_json(StringInfo buf, const char *str);
+
+
+#endif   /* JSON_H */

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/8a49f900/core/sql/common/jsonfuncs.cpp
----------------------------------------------------------------------
diff --git a/core/sql/common/jsonfuncs.cpp b/core/sql/common/jsonfuncs.cpp
new file mode 100644
index 0000000..fb05505
--- /dev/null
+++ b/core/sql/common/jsonfuncs.cpp
@@ -0,0 +1,757 @@
+#include "stringinfo.h"
+#include "json.h"
+#include <stdarg.h>
+#include <limits.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#include "str.h"
+
+/* semantic action functions for json_get* functions */
+static JsonReturnType get_object_start(void *state);
+static JsonReturnType get_object_end(void *state);
+static JsonReturnType get_object_field_start(void *state, char *fname, bool isnull);
+static JsonReturnType get_object_field_end(void *state, char *fname, bool isnull);
+static JsonReturnType get_array_start(void *state);
+static JsonReturnType get_array_end(void *state);
+static JsonReturnType get_array_element_start(void *state, bool isnull);
+static JsonReturnType get_array_element_end(void *state, bool isnull);
+static JsonReturnType get_scalar(void *state, char *token, JsonTokenType tokentype);
+
+/* common worker function for json getter functions */
+static JsonReturnType get_path_all(bool as_text, char *json, short nargs, va_list args, char **result);
+static JsonReturnType get_worker(char *json, char **tpath, int *ipath, int npath,
+                                 bool normalize_results, char **result);
+
+
+/* semantic action functions for json_array_length */
+static void alen_object_start(void *state);
+static void alen_scalar(void *state, char *token, JsonTokenType tokentype);
+static void alen_array_element_start(void *state, bool isnull);
+
+/* common workers for json{b}_each* functions */
+
+/* semantic action functions for json_each */
+static void each_object_field_start(void *state, char *fname, bool isnull);
+static void each_object_field_end(void *state, char *fname, bool isnull);
+static void each_array_start(void *state);
+static void each_scalar(void *state, char *token, JsonTokenType tokentype);
+
+/* semantic action functions for json_array_elements */
+static void elements_object_start(void *state);
+static void elements_array_element_start(void *state, bool isnull);
+static void elements_array_element_end(void *state, bool isnull);
+static void elements_scalar(void *state, char *token, JsonTokenType tokentype);
+
+/* semantic action functions for json_strip_nulls */
+static void sn_object_start(void *state);
+static void sn_object_end(void *state);
+static void sn_array_start(void *state);
+static void sn_array_end(void *state);
+static void sn_object_field_start(void *state, char *fname, bool isnull);
+static void sn_array_element_start(void *state, bool isnull);
+static void sn_scalar(void *state, char *token, JsonTokenType tokentype);
+
+/* state for json_object_keys */
+typedef struct OkeysState
+{
+    JsonLexContext *lex;
+    char	  **result;
+    int			result_size;
+    int			result_count;
+    int			sent_count;
+} OkeysState;
+
+/* state for json_get* functions */
+typedef struct GetState
+{
+    JsonLexContext *lex;
+    char	   *tresult;
+    char	   *result_start;
+    bool		normalize_results;
+    bool		next_scalar;
+    int			npath;			/* length of each path-related array */
+    char	  **path_names;		/* field name(s) being sought */
+    int		   *path_indexes;	/* array index(es) being sought */
+    bool	   *pathok;			/* is path matched to current depth? */
+    int		   *array_cur_index;	/* current element index at each path level */
+} GetState;
+
+/* state for json_array_length */
+typedef struct AlenState
+{
+    JsonLexContext *lex;
+    int			count;
+} AlenState;
+
+
+/* state for json_array_elements */
+typedef struct ElementsState
+{
+    JsonLexContext *lex;
+    const char *function_name;
+    char	   *result_start;
+    bool		normalize_results;
+    bool		next_scalar;
+    char	   *normalized_scalar;
+} ElementsState;
+
+/* state for json_strip_nulls */
+typedef struct StripnullState
+{
+    JsonLexContext *lex;
+    StringInfo	strval;
+    bool		skip_next_null;
+} StripnullState;
+
+JsonReturnType
+json_extract_path(char **result, char *json, short nargs, ...)
+{
+    JsonReturnType ret = JSON_OK;
+    va_list args;
+    va_start(args, nargs);
+    ret = get_path_all(true, json, nargs, args, result);
+    va_end(args);
+    return ret;
+}
+
+JsonReturnType
+json_extract_path_text(char **result, char *json, short nargs, ...)
+{
+    JsonReturnType ret = JSON_OK;
+    va_list args;
+    va_start(args, nargs);
+    ret = get_path_all(true, json, nargs, args, result);
+    va_end(args);
+    return ret;
+}
+
+/*
+ * common routine for extract_path functions
+ */
+static JsonReturnType
+get_path_all(bool as_text, char *json, short nargs, va_list args, char **result)
+{
+    bool	   *pathnulls;
+    char	  **tpath;
+    int		   *ipath;
+    int			i;
+    JsonReturnType ret = JSON_OK;
+
+    tpath = (char **)malloc(nargs * sizeof(char *));
+    ipath = (int *)malloc(nargs * sizeof(int));
+
+    for (i = 0; i < nargs; i++)
+    {
+        tpath[i] = va_arg(args, char *);
+
+        /*
+         * we have no idea at this stage what structure the document is so
+         * just convert anything in the path that we can to an integer and set
+         * all the other integers to INT_MIN which will never match.
+         */
+        if (*tpath[i] != '\0')
+        {
+            long		ind;
+            char	   *endptr;
+
+            errno = 0;
+            ind = strtol(tpath[i], &endptr, 10);
+            if (*endptr == '\0' && errno == 0 && ind <= INT_MAX && ind >= INT_MIN)
+                ipath[i] = (int) ind;
+            else
+                ipath[i] = INT_MIN;
+        }
+        else
+            ipath[i] = INT_MIN;
+    }
+
+    ret = get_worker(json, tpath, ipath, nargs, as_text, result);
+
+    if (tpath != NULL)
+        free(tpath);
+    if (ipath != NULL)
+        free(ipath);
+    return ret;
+}
+
+JsonReturnType json_object_field_text(char *json, char *fieldName, char **result)
+{
+    return get_worker(json, &fieldName, NULL, 1, true, result);
+}
+
+static JsonReturnType
+get_worker(char *json,
+           char **tpath,
+           int *ipath,
+           int npath,
+           bool normalize_results,
+           char **result)
+{
+    JsonLexContext *lex = makeJsonLexContext(json, true);
+    JsonSemAction *sem = (JsonSemAction *)malloc(sizeof(JsonSemAction));
+    GetState   *state = (GetState *)malloc(sizeof(GetState));
+    JsonReturnType ret;
+
+    memset(sem, 0, sizeof(JsonSemAction));
+    memset(state, 0, sizeof(GetState));
+
+    if(npath < 0)
+        return JSON_UNEXPECTED_ERROR;
+
+    state->lex = lex;
+    /* is it "_as_text" variant? */
+    state->normalize_results = normalize_results;
+    state->npath = npath;
+    state->path_names = tpath;
+    state->path_indexes = ipath;
+    state->pathok = (bool *)malloc(sizeof(bool) * npath);
+    state->array_cur_index = (int *)malloc(sizeof(int) * npath);
+
+    if (npath > 0)
+        state->pathok[0] = true;
+
+    sem->semstate = (void *) state;
+
+    /*
+     * Not all variants need all the semantic routines. Only set the ones that
+     * are actually needed for maximum efficiency.
+     */
+    sem->scalar = get_scalar;
+    if (npath == 0)
+    {
+        sem->object_start = get_object_start;
+        sem->object_end = get_object_end;
+        sem->array_start = get_array_start;
+        sem->array_end = get_array_end;
+    }
+    if (tpath != NULL)
+    {
+        sem->object_field_start = get_object_field_start;
+        sem->object_field_end = get_object_field_end;
+    }
+    if (ipath != NULL)
+    {
+        sem->array_start = get_array_start;
+        sem->array_element_start = get_array_element_start;
+        sem->array_element_end = get_array_element_end;
+    }
+
+    ret = pg_parse_json(lex, sem);
+    if (ret == JSON_OK)
+        *result = state->tresult;
+    else
+        *result = NULL;
+
+    if (lex != NULL)
+    {
+        if (lex->strval != NULL)
+        {
+            if (lex->strval->data != NULL)
+                free(lex->strval->data);
+            free(lex->strval);
+        }
+        free(lex);
+    }
+    if (sem != NULL)
+        free(sem);
+    if (state != NULL)
+        free(state);
+    return ret;
+}
+
+static JsonReturnType
+get_object_start(void *state)
+{
+    GetState   *_state = (GetState *) state;
+    int			lex_level = _state->lex->lex_level;
+
+    if (lex_level == 0 && _state->npath == 0)
+    {
+        /*
+         * Special case: we should match the entire object.  We only need this
+         * at outermost level because at nested levels the match will have
+         * been started by the outer field or array element callback.
+         */
+        _state->result_start = _state->lex->token_start;
+    }
+    return JSON_OK;
+}
+
+static JsonReturnType
+get_object_end(void *state)
+{
+    GetState   *_state = (GetState *) state;
+    int			lex_level = _state->lex->lex_level;
+
+    if (lex_level == 0 && _state->npath == 0)
+    {
+        /* Special case: return the entire object */
+        char	   *start = _state->result_start;
+        int			len = _state->lex->prev_token_terminator - start;
+
+        //_state->tresult = cstring_to_text_with_len(start, len);
+        _state->tresult = (char *)malloc(len + 1);
+
+        memcpy(_state->tresult, start, len);
+        _state->tresult[len] = '\0';
+    }
+    return JSON_OK;
+}
+
+static JsonReturnType
+get_object_field_start(void *state, char *fname, bool isnull)
+{
+    GetState   *_state = (GetState *) state;
+    bool		get_next = false;
+    int			lex_level = _state->lex->lex_level;
+
+    if (lex_level <= _state->npath &&
+            _state->pathok[lex_level - 1] &&
+            _state->path_names != NULL &&
+            _state->path_names[lex_level - 1] != NULL &&
+            strcmp(fname, _state->path_names[lex_level - 1]) == 0)
+    {
+        if (lex_level < _state->npath)
+        {
+            /* if not at end of path just mark path ok */
+            _state->pathok[lex_level] = true;
+        }
+        else
+        {
+            /* end of path, so we want this value */
+            get_next = true;
+        }
+    }
+
+    if (get_next)
+    {
+        /* this object overrides any previous matching object */
+        _state->tresult = NULL;
+        _state->result_start = NULL;
+
+        if (_state->normalize_results &&
+                _state->lex->token_type == JSON_TOKEN_STRING)
+        {
+            /* for as_text variants, tell get_scalar to set it for us */
+            _state->next_scalar = true;
+        }
+        else
+        {
+            /* for non-as_text variants, just note the json starting point */
+            _state->result_start = _state->lex->token_start;
+        }
+    }
+    return JSON_OK;
+}
+
+static JsonReturnType
+get_object_field_end(void *state, char *fname, bool isnull)
+{
+    GetState   *_state = (GetState *) state;
+    bool		get_last = false;
+    int			lex_level = _state->lex->lex_level;
+
+    /* same tests as in get_object_field_start */
+    if (lex_level <= _state->npath &&
+            _state->pathok[lex_level - 1] &&
+            _state->path_names != NULL &&
+            _state->path_names[lex_level - 1] != NULL &&
+            strcmp(fname, _state->path_names[lex_level - 1]) == 0)
+    {
+        if (lex_level < _state->npath)
+        {
+            /* done with this field so reset pathok */
+            _state->pathok[lex_level] = false;
+        }
+        else
+        {
+            /* end of path, so we want this value */
+            get_last = true;
+        }
+    }
+
+    /* for as_text scalar case, our work is already done */
+    if (get_last && _state->result_start != NULL)
+    {
+        /*
+         * make a text object from the string from the prevously noted json
+         * start up to the end of the previous token (the lexer is by now
+         * ahead of us on whatever came after what we're interested in).
+         */
+        if (isnull && _state->normalize_results)
+            _state->tresult = (char *) NULL;
+        else
+        {
+            char	   *start = _state->result_start;
+            int			len = _state->lex->prev_token_terminator - start;
+
+            //_state->tresult = cstring_to_text_with_len(start, len);
+
+            _state->tresult = (char *)malloc(len + 1);
+
+            memcpy(_state->tresult, start, len);
+            _state->tresult[len] = '\0';
+        }
+
+        /* this should be unnecessary but let's do it for cleanliness: */
+        _state->result_start = NULL;
+    }
+    return JSON_OK;
+}
+
+static JsonReturnType
+get_array_start(void *state)
+{
+    GetState   *_state = (GetState *) state;
+    int			lex_level = _state->lex->lex_level;
+
+    if (lex_level < _state->npath)
+    {
+        /* Initialize counting of elements in this array */
+        _state->array_cur_index[lex_level] = -1;
+
+        /* INT_MIN value is reserved to represent invalid subscript */
+        if (_state->path_indexes[lex_level] < 0 &&
+                _state->path_indexes[lex_level] != INT_MIN)
+        {
+            /* Negative subscript -- convert to positive-wise subscript */
+            int		nelements;
+            JsonReturnType ret = json_count_array_elements(_state->lex, nelements);
+            if (ret != JSON_OK)
+                return ret;
+            if (-_state->path_indexes[lex_level] <= nelements)
+                _state->path_indexes[lex_level] += nelements;
+        }
+    }
+    else if (lex_level == 0 && _state->npath == 0)
+    {
+        /*
+         * Special case: we should match the entire array.  We only need this
+         * at the outermost level because at nested levels the match will
+         * have been started by the outer field or array element callback.
+         */
+        _state->result_start = _state->lex->token_start;
+    }
+    return JSON_OK;
+}
+
+static JsonReturnType
+get_array_end(void *state)
+{
+    GetState   *_state = (GetState *) state;
+    int			lex_level = _state->lex->lex_level;
+
+    if (lex_level == 0 && _state->npath == 0)
+    {
+        /* Special case: return the entire array */
+        char	   *start = _state->result_start;
+        int			len = _state->lex->prev_token_terminator - start;
+
+        //_state->tresult = cstring_to_text_with_len(start, len);
+
+        _state->tresult = (char *)malloc(len + 1);
+
+        memcpy(_state->tresult, start, len);
+        _state->tresult[len] = '\0';
+    }
+    return JSON_OK;
+}
+
+static JsonReturnType
+get_array_element_start(void *state, bool isnull)
+{
+    GetState   *_state = (GetState *) state;
+    bool		get_next = false;
+    int			lex_level = _state->lex->lex_level;
+
+    /* Update array element counter */
+    if (lex_level <= _state->npath)
+        _state->array_cur_index[lex_level - 1]++;
+
+    if (lex_level <= _state->npath &&
+            _state->pathok[lex_level - 1] &&
+            _state->path_indexes != NULL &&
+            _state->array_cur_index[lex_level - 1] == _state->path_indexes[lex_level - 1])
+    {
+        if (lex_level < _state->npath)
+        {
+            /* if not at end of path just mark path ok */
+            _state->pathok[lex_level] = true;
+        }
+        else
+        {
+            /* end of path, so we want this value */
+            get_next = true;
+        }
+    }
+
+    /* same logic as for objects */
+    if (get_next)
+    {
+        _state->tresult = NULL;
+        _state->result_start = NULL;
+
+        if (_state->normalize_results &&
+                _state->lex->token_type == JSON_TOKEN_STRING)
+            _state->next_scalar = true;
+        else
+            _state->result_start = _state->lex->token_start;
+    }
+    return JSON_OK;
+}
+
+static JsonReturnType
+get_array_element_end(void *state, bool isnull)
+{
+    GetState   *_state = (GetState *) state;
+    bool		get_last = false;
+    int			lex_level = _state->lex->lex_level;
+
+    /* same tests as in get_array_element_start */
+    if (lex_level <= _state->npath &&
+            _state->pathok[lex_level - 1] &&
+            _state->path_indexes != NULL &&
+            _state->array_cur_index[lex_level - 1] == _state->path_indexes[lex_level - 1])
+    {
+        if (lex_level < _state->npath)
+        {
+            /* done with this element so reset pathok */
+            _state->pathok[lex_level] = false;
+        }
+        else
+        {
+            /* end of path, so we want this value */
+            get_last = true;
+        }
+    }
+
+    /* same logic as for objects */
+    if (get_last && _state->result_start != NULL)
+    {
+        if (isnull && _state->normalize_results)
+            _state->tresult = (char *) NULL;
+        else
+        {
+            char	   *start = _state->result_start;
+            int			len = _state->lex->prev_token_terminator - start;
+
+            //_state->tresult = cstring_to_text_with_len(start, len);
+
+            _state->tresult = (char *)malloc(len + 1);
+
+            memcpy(_state->tresult, start, len);
+            _state->tresult[len] = '\0';
+        }
+
+        _state->result_start = NULL;
+    }
+    return JSON_OK;
+}
+
+static JsonReturnType
+get_scalar(void *state, char *token, JsonTokenType tokentype)
+{
+    GetState   *_state = (GetState *) state;
+    int			lex_level = _state->lex->lex_level;
+
+    /* Check for whole-object match */
+    if (lex_level == 0 && _state->npath == 0)
+    {
+        if (_state->normalize_results && tokentype == JSON_TOKEN_STRING)
+        {
+            /* we want the de-escaped string */
+            _state->next_scalar = true;
+        }
+        else if (_state->normalize_results && tokentype == JSON_TOKEN_NULL)
+            _state->tresult = (char *) NULL;
+        else
+        {
+            /*
+             * This is a bit hokey: we will suppress whitespace after the
+             * scalar token, but not whitespace before it.  Probably not worth
+             * doing our own space-skipping to avoid that.
+             */
+            char	   *start = _state->lex->input;
+            int			len = _state->lex->prev_token_terminator - start;
+
+            //_state->tresult = cstring_to_text_with_len(start, len);
+
+            _state->tresult = (char *)malloc(len + 1);
+
+            memcpy(_state->tresult, start, len);
+            _state->tresult[len] = '\0';
+        }
+    }
+
+    if (_state->next_scalar)
+    {
+        /* a de-escaped text value is wanted, so supply it */
+        //_state->tresult = cstring_to_text(token);
+        //_state->tresult = token;
+        int len = str_len(token);
+        _state->tresult = (char *)malloc(len + 1);
+
+        memcpy(_state->tresult, token, len);
+        _state->tresult[len] = '\0';
+        /* make sure the next call to get_scalar doesn't overwrite it */
+        _state->next_scalar = false;
+    }
+    return JSON_OK;
+}
+
+/*
+ * These next two checks ensure that the json is an array (since it can't be
+ * a scalar or an object).
+ */
+
+static void
+alen_object_start(void *state)
+{
+    AlenState  *_state = (AlenState *) state;
+
+    /* json structure check */
+    if (_state->lex->lex_level == 0)
+        return;
+}
+
+static void
+alen_scalar(void *state, char *token, JsonTokenType tokentype)
+{
+    AlenState  *_state = (AlenState *) state;
+
+    /* json structure check */
+    if (_state->lex->lex_level == 0)
+        return;
+}
+
+static void
+alen_array_element_start(void *state, bool isnull)
+{
+    AlenState  *_state = (AlenState *) state;
+
+    /* just count up all the level 1 elements */
+    if (_state->lex->lex_level == 1)
+        _state->count++;
+}
+
+
+static void
+elements_object_start(void *state)
+{
+    ElementsState *_state = (ElementsState *) state;
+
+    /* json structure check */
+    if (_state->lex->lex_level == 0)
+        return;
+}
+
+static void
+elements_scalar(void *state, char *token, JsonTokenType tokentype)
+{
+    ElementsState *_state = (ElementsState *) state;
+
+    /* json structure check */
+    if (_state->lex->lex_level == 0)
+        return;
+
+    /* supply de-escaped value if required */
+    if (_state->next_scalar)
+        _state->normalized_scalar = token;
+}
+
+
+/*
+ * Semantic actions for json_strip_nulls.
+ *
+ * Simply repeat the input on the output unless we encounter
+ * a null object field. State for this is set when the field
+ * is started and reset when the scalar action (which must be next)
+ * is called.
+ */
+
+static void
+sn_object_start(void *state)
+{
+    StripnullState *_state = (StripnullState *) state;
+
+    appendStringInfoCharMacro(_state->strval, '{');
+}
+
+static void
+sn_object_end(void *state)
+{
+    StripnullState *_state = (StripnullState *) state;
+
+    appendStringInfoCharMacro(_state->strval, '}');
+}
+
+static void
+sn_array_start(void *state)
+{
+    StripnullState *_state = (StripnullState *) state;
+
+    appendStringInfoCharMacro(_state->strval, '[');
+}
+
+static void
+sn_array_end(void *state)
+{
+    StripnullState *_state = (StripnullState *) state;
+
+    appendStringInfoCharMacro(_state->strval, ']');
+}
+
+static void
+sn_object_field_start(void *state, char *fname, bool isnull)
+{
+    StripnullState *_state = (StripnullState *) state;
+
+    if (isnull)
+    {
+        /*
+         * The next thing must be a scalar or isnull couldn't be true, so
+         * there is no danger of this state being carried down into a nested
+         * object or array. The flag will be reset in the scalar action.
+         */
+        _state->skip_next_null = true;
+        return;
+    }
+
+    if (_state->strval->data[_state->strval->len - 1] != '{')
+        appendStringInfoCharMacro(_state->strval, ',');
+
+    /*
+     * Unfortunately we don't have the quoted and escaped string any more, so
+     * we have to re-escape it.
+     */
+    escape_json(_state->strval, fname);
+
+    appendStringInfoCharMacro(_state->strval, ':');
+}
+
+static void
+sn_array_element_start(void *state, bool isnull)
+{
+    StripnullState *_state = (StripnullState *) state;
+
+    if (_state->strval->data[_state->strval->len - 1] != '[')
+        appendStringInfoCharMacro(_state->strval, ',');
+}
+
+static void
+sn_scalar(void *state, char *token, JsonTokenType tokentype)
+{
+    StripnullState *_state = (StripnullState *) state;
+
+    if (_state->skip_next_null)
+    {
+        _state->skip_next_null = false;
+        return;
+    }
+
+    if (tokentype == JSON_TOKEN_STRING)
+        escape_json(_state->strval, token);
+    else
+        appendStringInfoString(_state->strval, token);
+}

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/8a49f900/core/sql/common/stringinfo.cpp
----------------------------------------------------------------------
diff --git a/core/sql/common/stringinfo.cpp b/core/sql/common/stringinfo.cpp
new file mode 100644
index 0000000..b682f52
--- /dev/null
+++ b/core/sql/common/stringinfo.cpp
@@ -0,0 +1,258 @@
+#include "stringinfo.h"
+#include <stdlib.h>
+#include <string.h>
+#include "str.h"
+
+/*
+ * makeStringInfo
+ *
+ * Create an empty 'StringInfoData' & return a pointer to it.
+ */
+StringInfo
+makeStringInfo(void)
+{
+    StringInfo	res;
+
+    res = (StringInfo) malloc(sizeof(StringInfoData));
+
+    initStringInfo(res);
+
+    return res;
+}
+
+/*
+ * initStringInfo
+ *
+ * Initialize a StringInfoData struct (with previously undefined contents)
+ * to describe an empty string.
+ */
+void
+initStringInfo(StringInfo str)
+{
+    int			size = 1024;	/* initial default buffer size */
+
+    str->data = (char *) malloc(size);
+    str->maxlen = size;
+    resetStringInfo(str);
+}
+
+/*
+ * resetStringInfo
+ *
+ * Reset the StringInfo: the data buffer remains valid, but its
+ * previous content, if any, is cleared.
+ */
+void
+resetStringInfo(StringInfo str)
+{
+    str->data[0] = '\0';
+    str->len = 0;
+    str->cursor = 0;
+}
+
+/*
+ * appendStringInfo
+ *
+ * Format text data under the control of fmt (an sprintf-style format string)
+ * and append it to whatever is already in str.  More space is allocated
+ * to str if necessary.  This is sort of like a combination of sprintf and
+ * strcat.
+ */
+void
+appendStringInfo(StringInfo str, const char *fmt, ...)
+{
+    for (;;)
+    {
+        va_list		args;
+        int			needed;
+
+        /* Try to format the data. */
+        va_start(args, fmt);
+        needed = appendStringInfoVA(str, fmt, args);
+        va_end(args);
+
+        if (needed == 0)
+            break;				/* success */
+
+        /* Increase the buffer size and try again. */
+        enlargeStringInfo(str, needed);
+    }
+}
+
+/*
+ * appendStringInfoVA
+ *
+ * Attempt to format text data under the control of fmt (an sprintf-style
+ * format string) and append it to whatever is already in str.  If successful
+ * return zero; if not (because there's not enough space), return an estimate
+ * of the space needed, without modifying str.  Typically the caller should
+ * pass the return value to enlargeStringInfo() before trying again; see
+ * appendStringInfo for standard usage pattern.
+ *
+ * XXX This API is ugly, but there seems no alternative given the C spec's
+ * restrictions on what can portably be done with va_list arguments: you have
+ * to redo va_start before you can rescan the argument list, and we can't do
+ * that from here.
+ */
+int
+appendStringInfoVA(StringInfo str, const char *fmt, va_list args)
+{
+    int			avail;
+    size_t		nprinted;
+
+    if (str == NULL)
+        return 0;
+
+    /*
+     * If there's hardly any space, don't bother trying, just fail to make the
+     * caller enlarge the buffer first.  We have to guess at how much to
+     * enlarge, since we're skipping the formatting work.
+     */
+    avail = str->maxlen - str->len;
+    if (avail < 16)
+        return 32;
+
+    //nprinted = pvsnprintf(str->data + str->len, (size_t) avail, fmt, args);
+    nprinted = snprintf(str->data + str->len, (size_t) avail, fmt, args);
+
+    if (nprinted < (size_t) avail)
+    {
+        /* Success.  Note nprinted does not include trailing null. */
+        str->len += (int) nprinted;
+        return 0;
+    }
+
+    /* Restore the trailing null so that str is unmodified. */
+    str->data[str->len] = '\0';
+
+    /*
+     * Return pvsnprintf's estimate of the space needed.  (Although this is
+     * given as a size_t, we know it will fit in int because it's not more
+     * than MaxAllocSize.)
+     */
+    return (int) nprinted;
+}
+
+/*
+ * appendStringInfoChar
+ *
+ * Append a single byte to str.
+ * Like appendStringInfo(str, "%c", ch) but much faster.
+ */
+void
+appendStringInfoChar(StringInfo str, char ch)
+{
+    /* Make more room if needed */
+    if (str->len + 1 >= str->maxlen)
+        enlargeStringInfo(str, 1);
+
+    /* OK, append the character */
+    str->data[str->len] = ch;
+    str->len++;
+    str->data[str->len] = '\0';
+}
+
+
+/*
+ * appendStringInfoString
+ *
+ * Append a null-terminated string to str.
+ * Like appendStringInfo(str, "%s", s) but faster.
+ */
+void
+appendStringInfoString(StringInfo str, const char *s)
+{
+    appendBinaryStringInfo(str, s, str_len(s));
+}
+
+
+/*
+ * appendBinaryStringInfo
+ *
+ * Append arbitrary binary data to a StringInfo, allocating more space
+ * if necessary.
+ */
+void
+appendBinaryStringInfo(StringInfo str, const char *data, int datalen)
+{
+    //ASSERT(str != NULL);
+
+    /* Make more room if needed */
+    enlargeStringInfo(str, datalen);
+
+    /* OK, append the data */
+    memcpy(str->data + str->len, data, datalen);
+    str->len += datalen;
+
+    /*
+     * Keep a trailing null in place, even though it's probably useless for
+     * binary data.  (Some callers are dealing with text but call this because
+     * their input isn't null-terminated.)
+     */
+    str->data[str->len] = '\0';
+}
+
+
+/*
+ * enlargeStringInfo
+ *
+ * Make sure there is enough space for 'needed' more bytes
+ * ('needed' does not include the terminating null).
+ *
+ * External callers usually need not concern themselves with this, since
+ * all stringinfo.c routines do it automatically.  However, if a caller
+ * knows that a StringInfo will eventually become X bytes large, it
+ * can save some palloc overhead by enlarging the buffer before starting
+ * to store data in it.
+ *
+ * NB: because we use repalloc() to enlarge the buffer, the string buffer
+ * will remain allocated in the same memory context that was current when
+ * initStringInfo was called, even if another context is now current.
+ * This is the desired and indeed critical behavior!
+ */
+void
+enlargeStringInfo(StringInfo str, int needed)
+{
+    int			newlen;
+
+    /*
+     * Guard against out-of-range "needed" values.  Without this, we can get
+     * an overflow or infinite loop in the following.
+     */
+    if (needed < 0)				/* should not happen */
+        return;
+    if (((size_t) needed) >= (MaxAllocSize - (size_t) str->len))
+        return;
+
+    needed += str->len + 1;		/* total space required now */
+
+    /* Because of the above test, we now have needed <= MaxAllocSize */
+
+    if (needed <= str->maxlen)
+        return;					/* got enough space already */
+
+    /*
+     * We don't want to allocate just a little more space with each append;
+     * for efficiency, double the buffer size each time it overflows.
+     * Actually, we might need to more than double it if 'needed' is big...
+     */
+    newlen = 2 * str->maxlen;
+    while (needed > newlen)
+        newlen = 2 * newlen;
+
+    /*
+     * Clamp to MaxAllocSize in case we went past it.  Note we are assuming
+     * here that MaxAllocSize <= INT_MAX/2, else the above loop could
+     * overflow.  We will still have newlen >= needed.
+     */
+    if (newlen > (int) MaxAllocSize)
+        newlen = (int) MaxAllocSize;
+
+    if (str->data != NULL)
+        free(str->data);
+    str->data = (char *) malloc(newlen);
+
+    str->maxlen = newlen;
+}
+
+

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/8a49f900/core/sql/common/stringinfo.h
----------------------------------------------------------------------
diff --git a/core/sql/common/stringinfo.h b/core/sql/common/stringinfo.h
new file mode 100644
index 0000000..d61a6ca
--- /dev/null
+++ b/core/sql/common/stringinfo.h
@@ -0,0 +1,137 @@
+#ifndef STRINGINFO_H
+#define STRINGINFO_H
+
+#include <sys/types.h>
+#include <stdarg.h>
+
+#ifndef NULL
+#define NULL	((void *) 0)
+#endif
+
+/*-------------------------
+ * StringInfoData holds information about an extensible string.
+ *		data	is the current buffer for the string (allocated with palloc).
+ *		len		is the current string length.  There is guaranteed to be
+ *				a terminating '\0' at data[len], although this is not very
+ *				useful when the string holds binary data rather than text.
+ *		maxlen	is the allocated size in bytes of 'data', i.e. the maximum
+ *				string size (including the terminating '\0' char) that we can
+ *				currently store in 'data' without having to reallocate
+ *				more space.  We must always have maxlen > len.
+ *		cursor	is initialized to zero by makeStringInfo or initStringInfo,
+ *				but is not otherwise touched by the stringinfo.c routines.
+ *				Some routines use it to scan through a StringInfo.
+ *-------------------------
+ */
+typedef struct StringInfoData
+{
+    char	   *data;
+    int			len;
+    int			maxlen;
+    int			cursor;
+} StringInfoData;
+
+typedef StringInfoData *StringInfo;
+
+#define MaxAllocSize	((size_t) 0x3fffffff)		/* 1 gigabyte - 1 */
+
+/*------------------------
+ * There are two ways to create a StringInfo object initially:
+ *
+ * StringInfo stringptr = makeStringInfo();
+ *		Both the StringInfoData and the data buffer are palloc'd.
+ *
+ * StringInfoData string;
+ * initStringInfo(&string);
+ *		The data buffer is palloc'd but the StringInfoData is just local.
+ *		This is the easiest approach for a StringInfo object that will
+ *		only live as long as the current routine.
+ *
+ * To destroy a StringInfo, pfree() the data buffer, and then pfree() the
+ * StringInfoData if it was palloc'd.  There's no special support for this.
+ *
+ * NOTE: some routines build up a string using StringInfo, and then
+ * release the StringInfoData but return the data string itself to their
+ * caller.  At that point the data string looks like a plain palloc'd
+ * string.
+ *-------------------------
+ */
+
+/*------------------------
+ * makeStringInfo
+ * Create an empty 'StringInfoData' & return a pointer to it.
+ */
+extern StringInfo makeStringInfo(void);
+
+/*------------------------
+ * initStringInfo
+ * Initialize a StringInfoData struct (with previously undefined contents)
+ * to describe an empty string.
+ */
+extern void initStringInfo(StringInfo str);
+
+/*------------------------
+ * resetStringInfo
+ * Clears the current content of the StringInfo, if any. The
+ * StringInfo remains valid.
+ */
+extern void resetStringInfo(StringInfo str);
+
+/*------------------------
+ * appendStringInfoString
+ * Append a null-terminated string to str.
+ * Like appendStringInfo(str, "%s", s) but faster.
+ */
+extern void appendStringInfoString(StringInfo str, const char *s);
+
+/*------------------------
+ * appendStringInfoChar
+ * Append a single byte to str.
+ * Like appendStringInfo(str, "%c", ch) but much faster.
+ */
+extern void appendStringInfoChar(StringInfo str, char ch);
+/*------------------------
+ * appendBinaryStringInfo
+ * Append arbitrary binary data to a StringInfo, allocating more space
+ * if necessary.
+ */
+extern void appendBinaryStringInfo(StringInfo str,
+                                   const char *data, int datalen);
+/*------------------------
+ * appendStringInfo
+ * Format text data under the control of fmt (an sprintf-style format string)
+ * and append it to whatever is already in str.  More space is allocated
+ * to str if necessary.  This is sort of like a combination of sprintf and
+ * strcat.
+ */
+extern void appendStringInfo(StringInfo str, const char *fmt,...);
+
+/*------------------------
+ * appendStringInfoVA
+ * Attempt to format text data under the control of fmt (an sprintf-style
+ * format string) and append it to whatever is already in str.  If successful
+ * return zero; if not (because there's not enough space), return an estimate
+ * of the space needed, without modifying str.  Typically the caller should
+ * pass the return value to enlargeStringInfo() before trying again; see
+ * appendStringInfo for standard usage pattern.
+ */
+extern int	appendStringInfoVA(StringInfo str, const char *fmt, va_list args);
+
+
+/*------------------------
+ * enlargeStringInfo
+ * Make sure a StringInfo's buffer can hold at least 'needed' more bytes.
+ */
+extern void enlargeStringInfo(StringInfo str, int needed);
+
+/*------------------------
+ * appendStringInfoCharMacro
+ * As above, but a macro for even more speed where it matters.
+ * Caution: str argument will be evaluated multiple times.
+ */
+#define appendStringInfoCharMacro(str,ch) \
+	(((str)->len + 1 >= (str)->maxlen) ? \
+	 appendStringInfoChar(str, ch) : \
+	 (void)((str)->data[(str)->len] = (ch), (str)->data[++(str)->len] = '\0'))
+
+#endif   /* STRINGINFO_H */

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/8a49f900/core/sql/exp/ExpErrorEnums.h
----------------------------------------------------------------------
diff --git a/core/sql/exp/ExpErrorEnums.h b/core/sql/exp/ExpErrorEnums.h
index 33437e9..8834d23 100644
--- a/core/sql/exp/ExpErrorEnums.h
+++ b/core/sql/exp/ExpErrorEnums.h
@@ -553,6 +553,22 @@ enum ExeErrorCode
   CLI_RWRS_DECOMPRESS_ERROR        = 30045,
   CLI_RWRS_DECOMPRESS_LENGTH_ERROR = 30046,
   CLI_NAR_ERROR_DETAILS            = 30047,
+// ---------------------------------------------------------------------
+// Execution errors related to JSon parser
+// ---------------------------------------------------------------------
+  
+  EXE_JSON_INVALID_TOKEN                  = 32001,
+  EXE_JSON_INVALID_VALUE                  = 32002,
+  EXE_JSON_INVALID_STRING                 = 32003,
+  EXE_JSON_INVALID_ARRAY_START            = 32004,
+  EXE_JSON_INVALID_ARRAY_NEXT             = 32005,
+  EXE_JSON_INVALID_OBJECT_START           = 32006,
+  EXE_JSON_INVALID_OBJECT_LABEL           = 32007,
+  EXE_JSON_INVALID_OBJECT_NEXT            = 32008,
+  EXE_JSON_INVALID_OBJECT_COMMA           = 32009,
+  EXE_JSON_INVALID_END                    = 32010,
+  EXE_JSON_END_PREMATURELY                = 32011,
+  EXE_JSON_UNEXPECTED_ERROR               = 32012,
 
   // ---------------------------------------------------------------------
   // the trailer (use temporarily for new errors that aren't added yet)

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/8a49f900/core/sql/exp/ExpPackDefs.cpp
----------------------------------------------------------------------
diff --git a/core/sql/exp/ExpPackDefs.cpp b/core/sql/exp/ExpPackDefs.cpp
index d87497a..9997f4f 100644
--- a/core/sql/exp/ExpPackDefs.cpp
+++ b/core/sql/exp/ExpPackDefs.cpp
@@ -455,6 +455,12 @@ NA_EIDPROC Long ex_function_nvl::pack(void * space_)
   return packClause(space_, sizeof(ex_function_nvl));
 }  
 
+NA_EIDPROC Long ex_function_json_object_field_text::pack(void * space_)
+{
+  return packClause(space_, sizeof(ex_function_json_object_field_text));
+}  
+
+
 NA_EIDPROC Long ex_function_queryid_extract::pack(void * space_)
 {
   return packClause(space_, sizeof(ex_function_queryid_extract));

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/8a49f900/core/sql/exp/exp_clause.cpp
----------------------------------------------------------------------
diff --git a/core/sql/exp/exp_clause.cpp b/core/sql/exp/exp_clause.cpp
index c527c77..ee621a2 100644
--- a/core/sql/exp/exp_clause.cpp
+++ b/core/sql/exp/exp_clause.cpp
@@ -493,6 +493,10 @@ ex_clause::ex_clause(clause_type type,
 	case ITM_NVL:
 	  setClassID(FUNC_NVL);
 	  break;
+    case ITM_JSONOBJECTFIELDTEXT:
+	  setClassID(FUNC_JSON_ID);
+	  break;
+      
 	case ITM_EXTRACT_COLUMNS:
 	  setClassID(FUNC_EXTRACT_COLUMNS);
 	  break;
@@ -933,6 +937,9 @@ NA_EIDPROC char *ex_clause::findVTblPtr(short classID)
     case ex_clause::FUNC_NVL:
       GetVTblPtr(vtblPtr, ex_function_nvl);
       break;
+    case ex_clause::FUNC_JSON_ID:
+      GetVTblPtr(vtblPtr, ex_function_json_object_field_text);
+      break;
     case ex_clause::FUNC_EXTRACT_COLUMNS:
       GetVTblPtr(vtblPtr, ExFunctionExtractColumns);
       break;
@@ -1426,6 +1433,8 @@ NA_EIDPROC const char * getOperTypeEnumAsString(Int16 /*OperatorTypeEnum*/ ote)
     case ITM_NULLIFZERO: return "ITM_NULLIFZERO";
     case ITM_NVL: return "ITM_NVL";
 
+    case ITM_JSONOBJECTFIELDTEXT: return "ITM_JSONOBJECTFIELDTEXT";
+
     // subqueries
     case ITM_ROW_SUBQUERY: return "ITM_ROW_SUBQUERY";
     case ITM_IN_SUBQUERY: return "ITM_IN_SUBQUERY";

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/8a49f900/core/sql/exp/exp_clause.h
----------------------------------------------------------------------
diff --git a/core/sql/exp/exp_clause.h b/core/sql/exp/exp_clause.h
index 275914b..b184cce 100644
--- a/core/sql/exp/exp_clause.h
+++ b/core/sql/exp/exp_clause.h
@@ -207,7 +207,8 @@ public:
     FUNC_CRC32_ID            = 122,
     FUNC_MD5_ID              = 123,
     FUNC_SHA1_ID             = 124,
-    FUNC_SHA2_ID             = 125
+    FUNC_SHA2_ID             = 125,
+	FUNC_JSON_ID             =126
   };
 
   // max number of operands (including result) in a clause.