You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by jd...@apache.org on 2016/05/05 20:10:40 UTC
[01/20] hive git commit: HIVE-13516: Adding BTEQ .IF, .QUIT,
ERRORCODE to HPL/SQL (Dmitry Tolpeko reviewed by Alan Gates
Repository: hive
Updated Branches:
refs/heads/llap 03ee0481a -> 763e6969d
HIVE-13516: Adding BTEQ .IF, .QUIT, ERRORCODE to HPL/SQL (Dmitry Tolpeko reviewed by Alan Gates
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/2d33d091
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/2d33d091
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/2d33d091
Branch: refs/heads/llap
Commit: 2d33d091b61dce092543970e62f41b63af1f32d1
Parents: 8729966
Author: Dmitry Tolpeko <dm...@gmail.com>
Authored: Wed May 4 03:13:18 2016 -0700
Committer: Dmitry Tolpeko <dm...@gmail.com>
Committed: Wed May 4 03:13:18 2016 -0700
----------------------------------------------------------------------
.../antlr4/org/apache/hive/hplsql/Hplsql.g4 | 108 ++++++++++---
.../main/java/org/apache/hive/hplsql/Exec.java | 67 +++++++-
.../java/org/apache/hive/hplsql/Expression.java | 31 ++--
.../java/org/apache/hive/hplsql/Select.java | 31 ++--
.../java/org/apache/hive/hplsql/Signal.java | 2 +-
.../main/java/org/apache/hive/hplsql/Stmt.java | 154 ++++++++++++-------
hplsql/src/main/resources/hplsql-site.xml | 2 -
.../org/apache/hive/hplsql/TestHplsqlLocal.java | 5 +
.../apache/hive/hplsql/TestHplsqlOffline.java | 20 +++
hplsql/src/test/queries/local/if3_bteq.sql | 3 +
.../test/queries/offline/create_table_td.sql | 45 ++++++
hplsql/src/test/queries/offline/delete_all.sql | 1 +
hplsql/src/test/queries/offline/select.sql | 42 +++++
.../test/queries/offline/select_teradata.sql | 12 ++
hplsql/src/test/results/db/select_into.out.txt | 3 +-
hplsql/src/test/results/db/select_into2.out.txt | 4 +-
hplsql/src/test/results/local/if3_bteq.out.txt | 3 +
hplsql/src/test/results/local/lang.out.txt | 10 +-
.../results/offline/create_table_mssql.out.txt | 39 ++---
.../results/offline/create_table_mssql2.out.txt | 13 +-
.../results/offline/create_table_mysql.out.txt | 5 +-
.../results/offline/create_table_ora.out.txt | 65 ++++----
.../results/offline/create_table_ora2.out.txt | 9 +-
.../results/offline/create_table_pg.out.txt | 7 +-
.../results/offline/create_table_td.out.txt | 31 ++++
.../src/test/results/offline/delete_all.out.txt | 2 +
hplsql/src/test/results/offline/select.out.txt | 34 ++++
.../src/test/results/offline/select_db2.out.txt | 3 +-
.../results/offline/select_teradata.out.txt | 10 ++
29 files changed, 589 insertions(+), 172 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/2d33d091/hplsql/src/main/antlr4/org/apache/hive/hplsql/Hplsql.g4
----------------------------------------------------------------------
diff --git a/hplsql/src/main/antlr4/org/apache/hive/hplsql/Hplsql.g4 b/hplsql/src/main/antlr4/org/apache/hive/hplsql/Hplsql.g4
index b84116f..5ce0e23 100644
--- a/hplsql/src/main/antlr4/org/apache/hive/hplsql/Hplsql.g4
+++ b/hplsql/src/main/antlr4/org/apache/hive/hplsql/Hplsql.g4
@@ -30,7 +30,7 @@ single_block_stmt : // Single BEGIN END blo
T_BEGIN block exception_block? block_end
| stmt T_SEMICOLON?
;
-
+
block_end :
{!_input.LT(2).getText().equalsIgnoreCase("TRANSACTION")}? T_END
;
@@ -48,6 +48,7 @@ stmt :
| begin_transaction_stmt
| break_stmt
| call_stmt
+ | collect_stats_stmt
| close_stmt
| cmp_stmt
| copy_from_ftp_stmt
@@ -83,6 +84,7 @@ stmt :
| merge_stmt
| open_stmt
| print_stmt
+ | quit_stmt
| raise_stmt
| resignal_stmt
| return_stmt
@@ -181,9 +183,9 @@ declare_block_inplace :
declare_stmt_item :
declare_cursor_item
- | declare_var_item
| declare_condition_item
| declare_handler_item
+ | declare_var_item
| declare_temporary_table_item
;
@@ -213,15 +215,19 @@ declare_handler_item : // Condition handler declaration
;
declare_temporary_table_item : // DECLARE TEMPORARY TABLE statement
- T_GLOBAL? T_TEMPORARY T_TABLE ident (T_AS? T_OPEN_P select_stmt T_CLOSE_P | T_AS? select_stmt | T_OPEN_P create_table_columns T_CLOSE_P) create_table_options?
+ T_GLOBAL? T_TEMPORARY T_TABLE ident create_table_preoptions? create_table_definition
;
create_table_stmt :
- T_CREATE T_TABLE (T_IF T_NOT T_EXISTS)? table_name T_OPEN_P create_table_columns T_CLOSE_P create_table_options?
+ T_CREATE T_TABLE (T_IF T_NOT T_EXISTS)? table_name create_table_preoptions? create_table_definition
;
create_local_temp_table_stmt :
- T_CREATE (T_LOCAL T_TEMPORARY | (T_SET | T_MULTISET)? T_VOLATILE) T_TABLE ident create_table_preoptions? T_OPEN_P create_table_columns T_CLOSE_P create_table_options?
+ T_CREATE (T_LOCAL T_TEMPORARY | (T_SET | T_MULTISET)? T_VOLATILE) T_TABLE ident create_table_preoptions? create_table_definition
+ ;
+
+create_table_definition :
+ (T_AS? T_OPEN_P select_stmt T_CLOSE_P | T_AS? select_stmt | T_OPEN_P create_table_columns T_CLOSE_P) create_table_options?
;
create_table_columns :
@@ -262,7 +268,7 @@ create_table_preoptions :
;
create_table_preoptions_item :
- T_NO? T_LOG
+ T_NO? (T_LOG | T_FALLBACK)
;
create_table_options :
@@ -273,6 +279,7 @@ create_table_options_item :
T_ON T_COMMIT (T_DELETE | T_PRESERVE) T_ROWS
| create_table_options_ora_item
| create_table_options_db2_item
+ | create_table_options_td_item
| create_table_options_hive_item
| create_table_options_mssql_item
| create_table_options_mysql_item
@@ -296,6 +303,11 @@ create_table_options_db2_item :
| T_DEFINITION T_ONLY
;
+create_table_options_td_item :
+ T_UNIQUE? T_PRIMARY T_INDEX T_OPEN_P ident (T_COMMA ident)* T_CLOSE_P
+ | T_WITH T_DATA
+ ;
+
create_table_options_hive_item :
create_table_hive_row_format
;
@@ -379,7 +391,7 @@ dtype : // Data types
| T_VARCHAR
| T_VARCHAR2
| T_XML
- | L_ID ('%' (T_TYPE | T_ROWTYPE))? // User-defined or derived data type
+ | ident ('%' (T_TYPE | T_ROWTYPE))? // User-defined or derived data type
;
dtype_len : // Data type length or size specification
@@ -450,7 +462,9 @@ create_routine_params :
T_OPEN_P T_CLOSE_P
| T_OPEN_P create_routine_param_item (T_COMMA create_routine_param_item)* T_CLOSE_P
| {!_input.LT(1).getText().equalsIgnoreCase("IS") &&
- !_input.LT(1).getText().equalsIgnoreCase("AS")}?
+ !_input.LT(1).getText().equalsIgnoreCase("AS") &&
+ !(_input.LT(1).getText().equalsIgnoreCase("DYNAMIC") && _input.LT(2).getText().equalsIgnoreCase("RESULT"))
+ }?
create_routine_param_item (T_COMMA create_routine_param_item)*
;
@@ -484,6 +498,7 @@ exec_stmt : // EXEC, EXECUTE IMMEDIATE statement
if_stmt : // IF statement
if_plsql_stmt
| if_tsql_stmt
+ | if_bteq_stmt
;
if_plsql_stmt :
@@ -494,13 +509,17 @@ if_tsql_stmt :
T_IF bool_expr single_block_stmt (T_ELSE single_block_stmt)?
;
+if_bteq_stmt :
+ '.' T_IF bool_expr T_THEN single_block_stmt
+ ;
+
elseif_block :
(T_ELSIF | T_ELSEIF) bool_expr T_THEN block
;
else_block :
T_ELSE block
- ;
+ ;
include_stmt : // INCLUDE statement
T_INCLUDE (file_name | expr)
@@ -571,6 +590,14 @@ fetch_stmt : // FETCH cursor statement
T_FETCH T_FROM? L_ID T_INTO L_ID (T_COMMA L_ID)*
;
+collect_stats_stmt :
+ T_COLLECT (T_STATISTICS | T_STATS) T_ON table_name collect_stats_clause?
+ ;
+
+collect_stats_clause :
+ T_COLUMN T_OPEN_P ident (T_COMMA ident)* T_CLOSE_P
+ ;
+
close_stmt : // CLOSE cursor statement
T_CLOSE L_ID
;
@@ -652,6 +679,10 @@ print_stmt : // PRINT statement
T_PRINT expr
| T_PRINT T_OPEN_P expr T_CLOSE_P
;
+
+quit_stmt :
+ '.'? T_QUIT expr?
+ ;
raise_stmt :
T_RAISE
@@ -761,7 +792,7 @@ fullselect_set_clause :
;
subselect_stmt :
- (T_SELECT | T_SEL) select_list into_clause? from_clause? where_clause? group_by_clause? having_clause? order_by_clause? select_options?
+ (T_SELECT | T_SEL) select_list into_clause? from_clause? where_clause? group_by_clause? (having_clause | qualify_clause)? order_by_clause? select_options?
;
select_list :
@@ -834,6 +865,8 @@ from_table_values_row:
from_alias_clause :
{!_input.LT(1).getText().equalsIgnoreCase("EXEC") &&
!_input.LT(1).getText().equalsIgnoreCase("EXECUTE") &&
+ !_input.LT(1).getText().equalsIgnoreCase("INNER") &&
+ !_input.LT(1).getText().equalsIgnoreCase("LEFT") &&
!_input.LT(1).getText().equalsIgnoreCase("GROUP") &&
!_input.LT(1).getText().equalsIgnoreCase("ORDER") &&
!_input.LT(1).getText().equalsIgnoreCase("LIMIT") &&
@@ -856,6 +889,10 @@ group_by_clause :
having_clause :
T_HAVING bool_expr
;
+
+qualify_clause :
+ T_QUALIFY bool_expr
+ ;
order_by_clause :
T_ORDER T_BY expr (T_ASC | T_DESC)? (T_COMMA expr (T_ASC | T_DESC)?)*
@@ -879,7 +916,7 @@ update_assignment :
;
update_table :
- (table_name | (T_OPEN_P select_stmt T_CLOSE_P)) (T_AS? ident)?
+ (table_name from_clause? | T_OPEN_P select_stmt T_CLOSE_P) (T_AS? ident)?
;
update_upsert :
@@ -905,9 +942,14 @@ merge_action :
| T_DELETE
;
-delete_stmt : // DELETE statement
- T_DELETE T_FROM? table_name (T_AS? ident)? where_clause?
+delete_stmt :
+ T_DELETE T_FROM? table_name delete_alias? (where_clause | T_ALL)?
;
+
+delete_alias :
+ {!_input.LT(1).getText().equalsIgnoreCase("ALL")}?
+ T_AS? ident
+ ;
describe_stmt :
(T_DESCRIBE | T_DESC) T_TABLE? table_name
@@ -928,6 +970,7 @@ bool_expr_atom :
bool_expr_unary :
expr T_IS T_NOT? T_NULL
| expr T_BETWEEN expr T_AND expr
+ | T_NOT? T_EXISTS T_OPEN_P select_stmt T_CLOSE_P
| bool_expr_single_in
| bool_expr_multi_in
;
@@ -967,6 +1010,7 @@ expr :
| expr T_DIV expr
| expr T_ADD expr
| expr T_SUB expr
+ | T_OPEN_P select_stmt T_CLOSE_P
| T_OPEN_P expr T_CLOSE_P
| expr_interval
| expr_concat
@@ -997,6 +1041,8 @@ interval_item :
| T_DAYS
| T_MICROSECOND
| T_MICROSECONDS
+ | T_SECOND
+ | T_SECONDS
;
expr_concat : // String concatenation operator
@@ -1141,8 +1187,7 @@ timestamp_literal : // TIMESTAMP 'YYYY-MM-DD HH:MI:SS.FFF'
;
ident :
- L_ID
- | non_reserved_words
+ (L_ID | non_reserved_words) ('.' (L_ID | non_reserved_words))*
;
string : // String literal (single or double quoted)
@@ -1207,7 +1252,9 @@ non_reserved_words : // Tokens that are not reserved words
| T_CLOSE
| T_CLUSTERED
| T_CMP
+ | T_COLLECT
| T_COLLECTION
+ | T_COLUMN
| T_COMMENT
| T_CONSTANT
| T_COPY
@@ -1229,6 +1276,7 @@ non_reserved_words : // Tokens that are not reserved words
| T_CURRENT_TIMESTAMP
| T_CURRENT_USER
| T_CURSOR
+ | T_DATA
| T_DATABASE
| T_DATE
| T_DATETIME
@@ -1270,12 +1318,13 @@ non_reserved_words : // Tokens that are not reserved words
| T_EXCEPTION
| T_EXCLUSIVE
| T_EXISTS
- | T_EXIT
+ | T_EXIT
+ | T_FALLBACK
| T_FALSE
| T_FETCH
| T_FIELDS
| T_FILE
- | T_FILES
+ | T_FILES
| T_FIRST_VALUE
| T_FLOAT
| T_FOR
@@ -1390,7 +1439,9 @@ non_reserved_words : // Tokens that are not reserved words
| T_PROC
| T_PROCEDURE
| T_PWD
+ | T_QUALIFY
| T_QUERY_BAND
+ | T_QUIT
| T_QUOTED_IDENTIFIER
| T_RAISE
| T_RANK
@@ -1416,6 +1467,8 @@ non_reserved_words : // Tokens that are not reserved words
| T_ROW_COUNT
| T_ROW_NUMBER
| T_SCHEMA
+ | T_SECOND
+ | T_SECONDS
| T_SECURITY
| T_SEGMENT
| T_SEL
@@ -1434,7 +1487,9 @@ non_reserved_words : // Tokens that are not reserved words
| T_SQLEXCEPTION
| T_SQLINSERT
| T_SQLSTATE
- | T_SQLWARNING
+ | T_SQLWARNING
+ | T_STATS
+ | T_STATISTICS
| T_STEP
| T_STDEV
| T_STORAGE
@@ -1523,7 +1578,9 @@ T_CLIENT : C L I E N T ;
T_CLOSE : C L O S E ;
T_CLUSTERED : C L U S T E R E D;
T_CMP : C M P ;
+T_COLLECT : C O L L E C T ;
T_COLLECTION : C O L L E C T I O N ;
+T_COLUMN : C O L U M N ;
T_COMMENT : C O M M E N T;
T_CONSTANT : C O N S T A N T ;
T_COMMIT : C O M M I T ;
@@ -1541,7 +1598,8 @@ T_CS : C S;
T_CURRENT : C U R R E N T ;
T_CURRENT_SCHEMA : C U R R E N T '_' S C H E M A ;
T_CURSOR : C U R S O R ;
-T_DATABASE : D A T A B A S E;
+T_DATABASE : D A T A B A S E ;
+T_DATA : D A T A ;
T_DATE : D A T E ;
T_DATETIME : D A T E T I M E ;
T_DAY : D A Y ;
@@ -1582,6 +1640,7 @@ T_EXCEPTION : E X C E P T I O N ;
T_EXCLUSIVE : E X C L U S I V E ;
T_EXISTS : E X I S T S ;
T_EXIT : E X I T ;
+T_FALLBACK : F A L L B A C K ;
T_FALSE : F A L S E ;
T_FETCH : F E T C H ;
T_FIELDS : F I E L D S ;
@@ -1694,8 +1753,10 @@ T_PRESERVE : P R E S E R V E ;
T_PRIMARY : P R I M A R Y ;
T_PRINT : P R I N T ;
T_PROC : P R O C ;
-T_PROCEDURE : P R O C E D U R E;
+T_PROCEDURE : P R O C E D U R E ;
+T_QUALIFY : Q U A L I F Y ;
T_QUERY_BAND : Q U E R Y '_' B A N D ;
+T_QUIT : Q U I T ;
T_QUOTED_IDENTIFIER : Q U O T E D '_' I D E N T I F I E R ;
T_RAISE : R A I S E ;
T_REAL : R E A L ;
@@ -1722,6 +1783,8 @@ T_RS : R S ;
T_PWD : P W D ;
T_TRIM : T R I M ;
T_SCHEMA : S C H E M A ;
+T_SECOND : S E C O N D ;
+T_SECONDS : S E C O N D S;
T_SECURITY : S E C U R I T Y ;
T_SEGMENT : S E G M E N T ;
T_SEL : S E L ;
@@ -1742,6 +1805,8 @@ T_SQLEXCEPTION : S Q L E X C E P T I O N ;
T_SQLINSERT : S Q L I N S E R T ;
T_SQLSTATE : S Q L S T A T E ;
T_SQLWARNING : S Q L W A R N I N G ;
+T_STATS : S T A T S ;
+T_STATISTICS : S T A T I S T I C S ;
T_STEP : S T E P ;
T_STORAGE : S T O R A G E ;
T_STRING : S T R I N G ;
@@ -1836,7 +1901,7 @@ T_CLOSE_SB : ']' ;
T_SEMICOLON : ';' ;
T_SUB : '-' ;
-L_ID : L_ID_PART (L_BLANK* '.' L_BLANK* L_ID_PART)* // Identifier
+L_ID : L_ID_PART // Identifier
;
L_S_STRING : '\'' (('\'' '\'') | ('\\' '\'') | ~('\''))* '\'' // Single quoted string literal
;
@@ -1859,6 +1924,7 @@ L_LABEL : ([a-zA-Z] | L_DIGIT | '_')* ':'
fragment
L_ID_PART :
[a-zA-Z] ([a-zA-Z] | L_DIGIT | '_')* // Identifier part
+ | '$' '{' .*? '}'
| ('_' | '@' | ':' | '#' | '$') ([a-zA-Z] | L_DIGIT | '_' | '@' | ':' | '#' | '$')+ // (at least one char must follow special char)
| '"' .*? '"' // Quoted identifiers
| '[' .*? ']'
http://git-wip-us.apache.org/repos/asf/hive/blob/2d33d091/hplsql/src/main/java/org/apache/hive/hplsql/Exec.java
----------------------------------------------------------------------
diff --git a/hplsql/src/main/java/org/apache/hive/hplsql/Exec.java b/hplsql/src/main/java/org/apache/hive/hplsql/Exec.java
index 02605a8..67cf2ae 100644
--- a/hplsql/src/main/java/org/apache/hive/hplsql/Exec.java
+++ b/hplsql/src/main/java/org/apache/hive/hplsql/Exec.java
@@ -40,6 +40,7 @@ import org.antlr.v4.runtime.ParserRuleContext;
import org.antlr.v4.runtime.Token;
import org.antlr.v4.runtime.misc.NotNull;
import org.antlr.v4.runtime.tree.ParseTree;
+import org.antlr.v4.runtime.tree.TerminalNode;
import org.apache.commons.io.FileUtils;
import org.apache.hive.hplsql.Var.Type;
import org.apache.hive.hplsql.functions.*;
@@ -50,7 +51,8 @@ import org.apache.hive.hplsql.functions.*;
*/
public class Exec extends HplsqlBaseVisitor<Integer> {
- public static final String VERSION = "HPL/SQL 0.3.17";
+ public static final String VERSION = "HPL/SQL 0.3.31";
+ public static final String ERRORCODE = "ERRORCODE";
public static final String SQLCODE = "SQLCODE";
public static final String SQLSTATE = "SQLSTATE";
public static final String HOSTCODE = "HOSTCODE";
@@ -665,9 +667,14 @@ public class Exec extends HplsqlBaseVisitor<Integer> {
* Set SQLCODE
*/
public void setSqlCode(int sqlcode) {
+ Long code = new Long(sqlcode);
Var var = findVariable(SQLCODE);
if (var != null) {
- var.setValue(new Long(sqlcode));
+ var.setValue(code);
+ }
+ var = findVariable(ERRORCODE);
+ if (var != null) {
+ var.setValue(code);
}
}
@@ -783,6 +790,7 @@ public class Exec extends HplsqlBaseVisitor<Integer> {
new FunctionMisc(this).register(function);
new FunctionString(this).register(function);
new FunctionOra(this).register(function);
+ addVariable(new Var(ERRORCODE, Var.Type.BIGINT, 0L));
addVariable(new Var(SQLCODE, Var.Type.BIGINT, 0L));
addVariable(new Var(SQLSTATE, Var.Type.STRING, "00000"));
addVariable(new Var(HOSTCODE, Var.Type.BIGINT, 0L));
@@ -942,9 +950,10 @@ public class Exec extends HplsqlBaseVisitor<Integer> {
*/
Integer getProgramReturnCode() {
Integer rc = 0;
- if(!signals.empty()) {
+ if (!signals.empty()) {
Signal sig = signals.pop();
- if(sig.type == Signal.Type.LEAVE_ROUTINE && sig.value != null) {
+ if ((sig.type == Signal.Type.LEAVE_PROGRAM || sig.type == Signal.Type.LEAVE_ROUTINE) &&
+ sig.value != null) {
try {
rc = Integer.parseInt(sig.value);
}
@@ -1133,7 +1142,7 @@ public class Exec extends HplsqlBaseVisitor<Integer> {
String scale = null;
Var default_ = null;
if (ctx.dtype().T_ROWTYPE() != null) {
- row = meta.getRowDataType(ctx, exec.conf.defaultConnection, ctx.dtype().L_ID().getText());
+ row = meta.getRowDataType(ctx, exec.conf.defaultConnection, ctx.dtype().ident().getText());
if (row == null) {
type = Var.DERIVED_ROWTYPE;
}
@@ -1184,7 +1193,7 @@ public class Exec extends HplsqlBaseVisitor<Integer> {
String getDataType(HplsqlParser.Declare_var_itemContext ctx) {
String type = null;
if (ctx.dtype().T_TYPE() != null) {
- type = meta.getDataType(ctx, exec.conf.defaultConnection, ctx.dtype().L_ID().getText());
+ type = meta.getDataType(ctx, exec.conf.defaultConnection, ctx.dtype().ident().getText());
if (type == null) {
type = Var.DERIVED_TYPE;
}
@@ -1349,6 +1358,11 @@ public class Exec extends HplsqlBaseVisitor<Integer> {
}
@Override
+ public Integer visitCreate_table_options_td_item(HplsqlParser.Create_table_options_td_itemContext ctx) {
+ return 0;
+ }
+
+ @Override
public Integer visitCreate_table_options_mssql_item(HplsqlParser.Create_table_options_mssql_itemContext ctx) {
return 0;
}
@@ -1678,6 +1692,14 @@ public class Exec extends HplsqlBaseVisitor<Integer> {
}
/**
+ * IF statement (BTEQ syntax)
+ */
+ @Override
+ public Integer visitIf_bteq_stmt(HplsqlParser.If_bteq_stmtContext ctx) {
+ return exec.stmt.ifBteq(ctx);
+ }
+
+ /**
* USE statement
*/
@Override
@@ -1786,6 +1808,14 @@ public class Exec extends HplsqlBaseVisitor<Integer> {
return exec.stmt.print(ctx);
}
+ /**
+ * QUIT statement
+ */
+ @Override
+ public Integer visitQuit_stmt(HplsqlParser.Quit_stmtContext ctx) {
+ return exec.stmt.quit(ctx);
+ }
+
/**
* SIGNAL statement
*/
@@ -2290,6 +2320,31 @@ public class Exec extends HplsqlBaseVisitor<Integer> {
}
/**
+ * Append the text preserving the formatting (space symbols) between tokens
+ */
+ void append(StringBuilder str, String appendStr, Token start, Token stop) {
+ String spaces = start.getInputStream().getText(new org.antlr.v4.runtime.misc.Interval(start.getStartIndex(), stop.getStopIndex()));
+ spaces = spaces.substring(start.getText().length(), spaces.length() - stop.getText().length());
+ str.append(spaces);
+ str.append(appendStr);
+ }
+
+ void append(StringBuilder str, TerminalNode start, TerminalNode stop) {
+ String text = start.getSymbol().getInputStream().getText(new org.antlr.v4.runtime.misc.Interval(start.getSymbol().getStartIndex(), stop.getSymbol().getStopIndex()));
+ str.append(text);
+ }
+
+ /**
+ * Get the first non-null node
+ */
+ TerminalNode nvl(TerminalNode t1, TerminalNode t2) {
+ if (t1 != null) {
+ return t1;
+ }
+ return t2;
+ }
+
+ /**
* Evaluate the expression and pop value from the stack
*/
Var evalPop(ParserRuleContext ctx) {
http://git-wip-us.apache.org/repos/asf/hive/blob/2d33d091/hplsql/src/main/java/org/apache/hive/hplsql/Expression.java
----------------------------------------------------------------------
diff --git a/hplsql/src/main/java/org/apache/hive/hplsql/Expression.java b/hplsql/src/main/java/org/apache/hive/hplsql/Expression.java
index 33ef490..c10f702 100644
--- a/hplsql/src/main/java/org/apache/hive/hplsql/Expression.java
+++ b/hplsql/src/main/java/org/apache/hive/hplsql/Expression.java
@@ -74,8 +74,14 @@ public class Expression {
StringBuilder sql = new StringBuilder();
if (ctx.T_OPEN_P() != null) {
sql.append("(");
- sql.append(evalPop(ctx.expr(0)).toString());
- sql.append(")");
+ if (ctx.select_stmt() != null) {
+ exec.append(sql, evalPop(ctx.select_stmt()).toString(), ctx.T_OPEN_P().getSymbol(), ctx.select_stmt().getStart());
+ exec.append(sql, ctx.T_CLOSE_P().getText(), ctx.select_stmt().stop, ctx.T_CLOSE_P().getSymbol());
+ }
+ else {
+ sql.append(evalPop(ctx.expr(0)).toString());
+ sql.append(")");
+ }
}
else if (ctx.T_MUL() != null) {
sql.append(evalPop(ctx.expr(0)).toString());
@@ -232,6 +238,11 @@ public class Expression {
sql.append(" " + ctx.T_AND().getText() + " ");
sql.append(evalPop(ctx.expr(2)).toString());
}
+ else if (ctx.T_EXISTS() != null) {
+ exec.append(sql, exec.nvl(ctx.T_NOT(), ctx.T_EXISTS()), ctx.T_OPEN_P());
+ exec.append(sql, evalPop(ctx.select_stmt()).toString(), ctx.T_OPEN_P().getSymbol(), ctx.select_stmt().getStart());
+ exec.append(sql, ctx.T_CLOSE_P().getText(), ctx.select_stmt().stop, ctx.T_CLOSE_P().getSymbol());
+ }
else if (ctx.bool_expr_single_in() != null) {
singleInClauseSql(ctx.bool_expr_single_in(), sql);
}
@@ -245,14 +256,12 @@ public class Expression {
/**
* Single value IN clause in executable SQL statement
*/
- public void singleInClauseSql(HplsqlParser.Bool_expr_single_inContext ctx, StringBuilder sql) {
- sql.append(evalPop(ctx.expr(0)).toString());
- if (ctx.T_NOT() != null) {
- sql.append(" " + ctx.T_NOT().getText());
- }
- sql.append(" " + ctx.T_IN().getText() + " (");
+ public void singleInClauseSql(HplsqlParser.Bool_expr_single_inContext ctx, StringBuilder sql) {
+ sql.append(evalPop(ctx.expr(0)).toString() + " ");
+ exec.append(sql, exec.nvl(ctx.T_NOT(), ctx.T_IN()), ctx.T_OPEN_P());
if (ctx.select_stmt() != null) {
- sql.append(evalPop(ctx.select_stmt()));
+ exec.append(sql, evalPop(ctx.select_stmt()).toString(), ctx.T_OPEN_P().getSymbol(), ctx.select_stmt().getStart());
+ exec.append(sql, ctx.T_CLOSE_P().getText(), ctx.select_stmt().stop, ctx.T_CLOSE_P().getSymbol());
}
else {
int cnt = ctx.expr().size();
@@ -262,8 +271,8 @@ public class Expression {
sql.append(", ");
}
}
- }
- sql.append(")");
+ sql.append(")");
+ }
}
/**
http://git-wip-us.apache.org/repos/asf/hive/blob/2d33d091/hplsql/src/main/java/org/apache/hive/hplsql/Select.java
----------------------------------------------------------------------
diff --git a/hplsql/src/main/java/org/apache/hive/hplsql/Select.java b/hplsql/src/main/java/org/apache/hive/hplsql/Select.java
index 4bee252..589e984 100644
--- a/hplsql/src/main/java/org/apache/hive/hplsql/Select.java
+++ b/hplsql/src/main/java/org/apache/hive/hplsql/Select.java
@@ -25,6 +25,7 @@ import java.util.List;
import java.util.Stack;
import org.antlr.v4.runtime.ParserRuleContext;
+import org.antlr.v4.runtime.Token;
import org.antlr.v4.runtime.misc.Interval;
public class Select {
@@ -196,26 +197,38 @@ public class Select {
public Integer subselect(HplsqlParser.Subselect_stmtContext ctx) {
StringBuilder sql = new StringBuilder();
- if (ctx.T_SELECT() != null) {
- sql.append(ctx.T_SELECT().getText());
+ sql.append(ctx.start.getText());
+ exec.append(sql, evalPop(ctx.select_list()).toString(), ctx.start, ctx.select_list().getStart());
+ Token last = ctx.select_list().stop;
+ if (ctx.into_clause() != null) {
+ last = ctx.into_clause().stop;
}
- sql.append(" " + evalPop(ctx.select_list()));
if (ctx.from_clause() != null) {
- sql.append(" " + evalPop(ctx.from_clause()));
- } else if (conf.dualTable != null) {
+ exec.append(sql, evalPop(ctx.from_clause()).toString(), last, ctx.from_clause().getStart());
+ last = ctx.from_clause().stop;
+ }
+ else if (conf.dualTable != null) {
sql.append(" FROM " + conf.dualTable);
}
if (ctx.where_clause() != null) {
- sql.append(" " + evalPop(ctx.where_clause()));
+ exec.append(sql, evalPop(ctx.where_clause()).toString(), last, ctx.where_clause().getStart());
+ last = ctx.where_clause().stop;
}
if (ctx.group_by_clause() != null) {
- sql.append(" " + getText(ctx.group_by_clause()));
+ exec.append(sql, getText(ctx.group_by_clause()), last, ctx.group_by_clause().getStart());
+ last = ctx.group_by_clause().stop;
}
if (ctx.having_clause() != null) {
- sql.append(" " + getText(ctx.having_clause()));
+ exec.append(sql, getText(ctx.having_clause()), last, ctx.having_clause().getStart());
+ last = ctx.having_clause().stop;
+ }
+ if (ctx.qualify_clause() != null) {
+ exec.append(sql, getText(ctx.qualify_clause()), last, ctx.qualify_clause().getStart());
+ last = ctx.qualify_clause().stop;
}
if (ctx.order_by_clause() != null) {
- sql.append(" " + getText(ctx.order_by_clause()));
+ exec.append(sql, getText(ctx.order_by_clause()), last, ctx.order_by_clause().getStart());
+ last = ctx.order_by_clause().stop;
}
if (ctx.select_options() != null) {
Var opt = evalPop(ctx.select_options());
http://git-wip-us.apache.org/repos/asf/hive/blob/2d33d091/hplsql/src/main/java/org/apache/hive/hplsql/Signal.java
----------------------------------------------------------------------
diff --git a/hplsql/src/main/java/org/apache/hive/hplsql/Signal.java b/hplsql/src/main/java/org/apache/hive/hplsql/Signal.java
index 2c8cfc1..ddefcd8 100644
--- a/hplsql/src/main/java/org/apache/hive/hplsql/Signal.java
+++ b/hplsql/src/main/java/org/apache/hive/hplsql/Signal.java
@@ -22,7 +22,7 @@ package org.apache.hive.hplsql;
* Signals and exceptions
*/
public class Signal {
- public enum Type { LEAVE_LOOP, LEAVE_ROUTINE, SQLEXCEPTION, NOTFOUND, UNSUPPORTED_OPERATION, USERDEFINED };
+ public enum Type { LEAVE_LOOP, LEAVE_ROUTINE, LEAVE_PROGRAM, SQLEXCEPTION, NOTFOUND, UNSUPPORTED_OPERATION, USERDEFINED };
Type type;
String value = "";
Exception exception = null;
http://git-wip-us.apache.org/repos/asf/hive/blob/2d33d091/hplsql/src/main/java/org/apache/hive/hplsql/Stmt.java
----------------------------------------------------------------------
diff --git a/hplsql/src/main/java/org/apache/hive/hplsql/Stmt.java b/hplsql/src/main/java/org/apache/hive/hplsql/Stmt.java
index d35f994..17d2195 100644
--- a/hplsql/src/main/java/org/apache/hive/hplsql/Stmt.java
+++ b/hplsql/src/main/java/org/apache/hive/hplsql/Stmt.java
@@ -25,9 +25,8 @@ import java.util.Stack;
import java.util.UUID;
import org.antlr.v4.runtime.ParserRuleContext;
+import org.antlr.v4.runtime.Token;
import org.apache.hive.hplsql.Var.Type;
-import org.apache.hive.hplsql.HplsqlParser.Create_table_columns_itemContext;
-import org.apache.hive.hplsql.HplsqlParser.Create_table_columnsContext;
/**
* HPL/SQL statements execution
@@ -130,30 +129,13 @@ public class Stmt {
public Integer createTable(HplsqlParser.Create_table_stmtContext ctx) {
trace(ctx, "CREATE TABLE");
StringBuilder sql = new StringBuilder();
- sql.append(exec.getText(ctx, ctx.T_CREATE().getSymbol(), ctx.T_TABLE().getSymbol()));
- sql.append(" " + evalPop(ctx.table_name()) + " (");
- int cnt = ctx.create_table_columns().create_table_columns_item().size();
- int cols = 0;
- for (int i = 0; i < cnt; i++) {
- Create_table_columns_itemContext col = ctx.create_table_columns().create_table_columns_item(i);
- if (col.create_table_column_cons() != null) {
- continue;
- }
- if (cols > 0) {
- sql.append(",\n");
- }
- sql.append(evalPop(col.column_name()));
- sql.append(" ");
- sql.append(exec.evalPop(col.dtype(), col.dtype_len()));
- cols++;
- }
- sql.append("\n)");
- if (ctx.create_table_options() != null) {
- String opt = evalPop(ctx.create_table_options()).toString();
- if (opt != null) {
- sql.append(" " + opt);
- }
+ exec.append(sql, ctx.T_CREATE(), ctx.T_TABLE());
+ exec.append(sql, evalPop(ctx.table_name()).toString(), ctx.T_TABLE().getSymbol(), ctx.table_name().getStart());
+ Token last = ctx.table_name().getStop();
+ if (ctx.create_table_preoptions() != null) {
+ last = ctx.create_table_preoptions().stop;
}
+ sql.append(createTableDefinition(ctx.create_table_definition(), last));
trace(ctx, sql.toString());
Query query = exec.executeSql(ctx, sql.toString(), exec.conf.defaultConnection);
if (query.error()) {
@@ -166,6 +148,40 @@ public class Stmt {
}
/**
+ * Get CREATE TABLE definition (columns or query)
+ */
+ String createTableDefinition(HplsqlParser.Create_table_definitionContext ctx, Token last) {
+ StringBuilder sql = new StringBuilder();
+ HplsqlParser.Create_table_columnsContext colCtx = ctx.create_table_columns();
+ if (colCtx != null) {
+ int cnt = colCtx.create_table_columns_item().size();
+ for (int i = 0; i < cnt; i++) {
+ HplsqlParser.Create_table_columns_itemContext col = colCtx.create_table_columns_item(i);
+ if (col.create_table_column_cons() != null) {
+ last = col.getStop();
+ continue;
+ }
+ exec.append(sql, evalPop(col.column_name()).toString(), last, col.column_name().getStop());
+ exec.append(sql, exec.evalPop(col.dtype(), col.dtype_len()), col.column_name().getStop(), col.dtype().getStart());
+ last = col.getStop();
+ }
+ exec.append(sql, ctx.T_CLOSE_P().getText(), last, ctx.T_CLOSE_P().getSymbol());
+ }
+ else {
+ exec.append(sql, evalPop(ctx.select_stmt()).toString(), last, ctx.select_stmt().getStart());
+ exec.append(sql, ctx.T_CLOSE_P().getText(), ctx.select_stmt().stop, ctx.T_CLOSE_P().getSymbol());
+ }
+ HplsqlParser.Create_table_optionsContext options = ctx.create_table_options();
+ if (options != null) {
+ String opt = evalPop(options).toString();
+ if (opt != null) {
+ sql.append(" " + opt);
+ }
+ }
+ return sql.toString();
+ }
+
+ /**
* CREATE TABLE options for Hive
*/
public Integer createTableHiveOptions(HplsqlParser.Create_table_options_hive_itemContext ctx) {
@@ -196,7 +212,6 @@ public class Stmt {
* CREATE TABLE options for MySQL
*/
public Integer createTableMysqlOptions(HplsqlParser.Create_table_options_mysql_itemContext ctx) {
- StringBuilder sql = new StringBuilder();
if (ctx.T_COMMENT() != null) {
evalString(ctx.T_COMMENT().getText() + " " + evalPop(ctx.expr()).toSqlString());
}
@@ -207,11 +222,8 @@ public class Stmt {
* DECLARE TEMPORARY TABLE statement
*/
public Integer declareTemporaryTable(HplsqlParser.Declare_temporary_table_itemContext ctx) {
- String name = ctx.ident().getText();
- if (trace) {
- trace(ctx, "DECLARE TEMPORARY TABLE " + name);
- }
- return createTemporaryTable(ctx, ctx.create_table_columns(), name);
+ trace(ctx, "DECLARE TEMPORARY TABLE");
+ return createTemporaryTable(ctx.ident(), ctx.create_table_definition(), ctx.create_table_preoptions());
}
/**
@@ -256,37 +268,45 @@ public class Stmt {
* CREATE LOCAL TEMPORARY | VOLATILE TABLE statement
*/
public Integer createLocalTemporaryTable(HplsqlParser.Create_local_temp_table_stmtContext ctx) {
- String name = ctx.ident().getText();
- if (trace) {
- trace(ctx, "CREATE LOCAL TEMPORARY TABLE " + name);
- }
- return createTemporaryTable(ctx, ctx.create_table_columns(), name);
+ trace(ctx, "CREATE LOCAL TEMPORARY TABLE");
+ return createTemporaryTable(ctx.ident(), ctx.create_table_definition(), ctx.create_table_preoptions());
}
/**
* Create a temporary table statement
*/
- public Integer createTemporaryTable(ParserRuleContext ctx, Create_table_columnsContext colCtx, String name) {
+ public Integer createTemporaryTable(HplsqlParser.IdentContext identCtx, HplsqlParser.Create_table_definitionContext defCtx,
+ HplsqlParser.Create_table_preoptionsContext optCtx) {
+ StringBuilder sql = new StringBuilder();
+ String name = identCtx.getText();
String managedName = null;
- String sql = null;
- String columns = exec.getFormattedText(colCtx);
+ Token last = identCtx.getStop();
+ if (optCtx != null) {
+ last = optCtx.stop;
+ }
if (conf.tempTables == Conf.TempTables.NATIVE) {
- sql = "CREATE TEMPORARY TABLE " + name + "\n(" + columns + "\n)";
- } else if (conf.tempTables == Conf.TempTables.MANAGED) {
+ sql.append("CREATE TEMPORARY TABLE " + name);
+ sql.append(createTableDefinition(defCtx, last));
+ }
+ else if (conf.tempTables == Conf.TempTables.MANAGED) {
managedName = name + "_" + UUID.randomUUID().toString().replace("-","");
if (!conf.tempTablesSchema.isEmpty()) {
managedName = conf.tempTablesSchema + "." + managedName;
}
- sql = "CREATE TABLE " + managedName + "\n(" + columns + "\n)";
+ sql.append("CREATE TABLE " + managedName);
+ sql.append(createTableDefinition(defCtx, last));
if (!conf.tempTablesLocation.isEmpty()) {
- sql += "\nLOCATION '" + conf.tempTablesLocation + "/" + managedName + "'";
+ sql.append("\nLOCATION '" + conf.tempTablesLocation + "/" + managedName + "'");
}
if (trace) {
- trace(ctx, "Managed table name: " + managedName);
+ trace(null, "Managed table name: " + managedName);
}
}
+ if (trace) {
+ trace(null, sql.toString());
+ }
if (sql != null) {
- Query query = exec.executeSql(ctx, sql, exec.conf.defaultConnection);
+ Query query = exec.executeSql(null, sql.toString(), exec.conf.defaultConnection);
if (query.error()) {
exec.signal(query);
return 1;
@@ -606,6 +626,19 @@ public class Stmt {
}
/**
+ * IF statement (BTEQ syntax)
+ */
+ public Integer ifBteq(HplsqlParser.If_bteq_stmtContext ctx) {
+ trace(ctx, "IF");
+ visit(ctx.bool_expr());
+ if (exec.stackPop().isTrue()) {
+ trace(ctx, "IF TRUE executed");
+ visit(ctx.single_block_stmt());
+ }
+ return 0;
+ }
+
+ /**
* Assignment from SELECT statement
*/
public Integer assignFromSelect(HplsqlParser.Assignment_stmt_select_itemContext ctx) {
@@ -1103,13 +1136,17 @@ public class Stmt {
trace(ctx, "DELETE");
String table = evalPop(ctx.table_name()).toString();
StringBuilder sql = new StringBuilder();
- sql.append("DELETE FROM ");
- sql.append(table);
- if (ctx.where_clause() != null) {
- boolean oldBuildSql = exec.buildSql;
- exec.buildSql = true;
- sql.append(" " + evalPop(ctx.where_clause()).toString());
- exec.buildSql = oldBuildSql;
+ if (ctx.T_ALL() == null) {
+ sql.append("DELETE FROM " + table);
+ if (ctx.where_clause() != null) {
+ boolean oldBuildSql = exec.buildSql;
+ exec.buildSql = true;
+ sql.append(" " + evalPop(ctx.where_clause()).toString());
+ exec.buildSql = oldBuildSql;
+ }
+ }
+ else {
+ sql.append("TRUNCATE TABLE " + table);
}
trace(ctx, sql.toString());
Query query = exec.executeSql(ctx, sql.toString(), exec.conf.defaultConnection);
@@ -1150,6 +1187,19 @@ public class Stmt {
return 0;
}
+ /**
+ * QUIT Statement
+ */
+ public Integer quit(HplsqlParser.Quit_stmtContext ctx) {
+ trace(ctx, "QUIT");
+ String rc = null;
+ if (ctx.expr() != null) {
+ rc = evalPop(ctx.expr()).toString();
+ }
+ exec.signal(Signal.Type.LEAVE_PROGRAM, rc);
+ return 0;
+ }
+
/**
* SET current schema
*/
http://git-wip-us.apache.org/repos/asf/hive/blob/2d33d091/hplsql/src/main/resources/hplsql-site.xml
----------------------------------------------------------------------
diff --git a/hplsql/src/main/resources/hplsql-site.xml b/hplsql/src/main/resources/hplsql-site.xml
index 7e2d92d..05fe857 100644
--- a/hplsql/src/main/resources/hplsql-site.xml
+++ b/hplsql/src/main/resources/hplsql-site.xml
@@ -12,7 +12,6 @@
<property>
<name>hplsql.conn.init.hiveconn</name>
<value>
- set mapred.job.queue.name=default;
set hive.execution.engine=mr;
use default;
</value>
@@ -36,7 +35,6 @@
<property>
<name>hplsql.conn.init.hive2conn</name>
<value>
- set mapred.job.queue.name=default;
set hive.execution.engine=mr;
use default;
</value>
http://git-wip-us.apache.org/repos/asf/hive/blob/2d33d091/hplsql/src/test/java/org/apache/hive/hplsql/TestHplsqlLocal.java
----------------------------------------------------------------------
diff --git a/hplsql/src/test/java/org/apache/hive/hplsql/TestHplsqlLocal.java b/hplsql/src/test/java/org/apache/hive/hplsql/TestHplsqlLocal.java
index 80915ea..9b5a956 100644
--- a/hplsql/src/test/java/org/apache/hive/hplsql/TestHplsqlLocal.java
+++ b/hplsql/src/test/java/org/apache/hive/hplsql/TestHplsqlLocal.java
@@ -222,6 +222,11 @@ public class TestHplsqlLocal {
public void testIf2() throws Exception {
run("if2");
}
+
+ @Test
+ public void testIf3Bteq() throws Exception {
+ run("if3_bteq");
+ }
@Test
public void testInclude() throws Exception {
http://git-wip-us.apache.org/repos/asf/hive/blob/2d33d091/hplsql/src/test/java/org/apache/hive/hplsql/TestHplsqlOffline.java
----------------------------------------------------------------------
diff --git a/hplsql/src/test/java/org/apache/hive/hplsql/TestHplsqlOffline.java b/hplsql/src/test/java/org/apache/hive/hplsql/TestHplsqlOffline.java
index 59b7bff..3e897be 100644
--- a/hplsql/src/test/java/org/apache/hive/hplsql/TestHplsqlOffline.java
+++ b/hplsql/src/test/java/org/apache/hive/hplsql/TestHplsqlOffline.java
@@ -64,16 +64,36 @@ public class TestHplsqlOffline {
}
@Test
+ public void testCreateTableTd() throws Exception {
+ run("create_table_td");
+ }
+
+ @Test
+ public void testDeleteAll() throws Exception {
+ run("delete_all");
+ }
+
+ @Test
public void testInsertMysql() throws Exception {
run("insert_mysql");
}
@Test
+ public void testSelect() throws Exception {
+ run("select");
+ }
+
+ @Test
public void testSelectDb2() throws Exception {
run("select_db2");
}
@Test
+ public void testSelectTeradata() throws Exception {
+ run("select_teradata");
+ }
+
+ @Test
public void testUpdate() throws Exception {
run("update");
}
http://git-wip-us.apache.org/repos/asf/hive/blob/2d33d091/hplsql/src/test/queries/local/if3_bteq.sql
----------------------------------------------------------------------
diff --git a/hplsql/src/test/queries/local/if3_bteq.sql b/hplsql/src/test/queries/local/if3_bteq.sql
new file mode 100644
index 0000000..12a39a9
--- /dev/null
+++ b/hplsql/src/test/queries/local/if3_bteq.sql
@@ -0,0 +1,3 @@
+.if errorcode = 0 then .quit errorcode
+
+print 'Failed: must not be executed';
http://git-wip-us.apache.org/repos/asf/hive/blob/2d33d091/hplsql/src/test/queries/offline/create_table_td.sql
----------------------------------------------------------------------
diff --git a/hplsql/src/test/queries/offline/create_table_td.sql b/hplsql/src/test/queries/offline/create_table_td.sql
new file mode 100644
index 0000000..2572bb6
--- /dev/null
+++ b/hplsql/src/test/queries/offline/create_table_td.sql
@@ -0,0 +1,45 @@
+CREATE TABLE tab, NO LOG, NO FALLBACK
+ (
+ SOURCE_ID INT,
+ RUN_ID INT,
+ STATUS CHAR,
+ LOAD_START timestamp(0),
+ LOAD_END timestamp(0)
+ );
+
+CREATE TABLE ctl, NO LOG, NO FALLBACK
+AS
+(
+ SELECT
+ EBC.SOURCE_ID,
+ MAX(EBC.RUN_ID) AS RUN_ID,
+ EBC.STATUS,
+ EBC.LOAD_START,
+ EBC.LOAD_END
+ FROM
+ EBC
+ WHERE
+ EBC.SOURCE_ID = 451 AND
+ EBC.STATUS = 'R'
+ GROUP BY
+ 1,3,4,5
+);
+
+CREATE SET VOLATILE TABLE ctl2, NO LOG, NO FALLBACK
+AS
+(
+ SELECT
+ EBC.SOURCE_ID,
+ MAX(EBC.RUN_ID) AS RUN_ID,
+ EBC.STATUS,
+ EBC.LOAD_START,
+ EBC.LOAD_END
+ FROM
+ EBC
+ WHERE
+ EBC.SOURCE_ID = 451 AND
+ EBC.STATUS = 'R'
+ GROUP BY
+ 1,3,4,5
+) WITH DATA PRIMARY INDEX (LOAD_START,LOAD_END)
+ ON COMMIT PRESERVE ROWS ;
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hive/blob/2d33d091/hplsql/src/test/queries/offline/delete_all.sql
----------------------------------------------------------------------
diff --git a/hplsql/src/test/queries/offline/delete_all.sql b/hplsql/src/test/queries/offline/delete_all.sql
new file mode 100644
index 0000000..e89fd48
--- /dev/null
+++ b/hplsql/src/test/queries/offline/delete_all.sql
@@ -0,0 +1 @@
+DELETE FROM TEST1_DB.WK_WRK ALL;
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hive/blob/2d33d091/hplsql/src/test/queries/offline/select.sql
----------------------------------------------------------------------
diff --git a/hplsql/src/test/queries/offline/select.sql b/hplsql/src/test/queries/offline/select.sql
new file mode 100644
index 0000000..0b6912e
--- /dev/null
+++ b/hplsql/src/test/queries/offline/select.sql
@@ -0,0 +1,42 @@
+SELECT * FROM a where 1=1 and not exists (select * from b)--abc;
+
+SELECT *
+ FROM a
+ where not exists
+ (
+ select * from b
+ );
+
+SELECT
+ *
+ FROM
+ tab
+ WHERE FILE_DATE > (
+ SELECT
+ MAX(FILE_DATE) AS MX_C_FILE_DT
+ FROM tab
+ WHERE FLAG = 'C'
+ AND IND = 'C'
+ AND FILE_DATE <
+ ( SELECT
+ CAST( LOAD_START AS DATE)
+ FROM
+ tab
+ WHERE
+ SOURCE_ID = 451 AND
+ BATCH = 'R'
+ )
+ );
+
+SELECT
+*
+FROM
+ DLTA_POC
+ LEFT OUTER JOIN TEST3_DB.TET ORG
+ ON DLTA_POC.YS_NO = ORG.EM_CODE_A
+ AND DLTA_POC.AREA_NO = ORG.AREA_CODE_2
+ AND DLTA_POC.GNT_POC = ORG.GEN_CD
+
+ LEFT OUTER JOIN TEST.LOCATION LOC
+ ON DLTA_POC.SE_KEY_POC = LOC.LOC_ID
+ AND LOC.LOCATION_END_DT = DATE '9999-12-31' ;
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hive/blob/2d33d091/hplsql/src/test/queries/offline/select_teradata.sql
----------------------------------------------------------------------
diff --git a/hplsql/src/test/queries/offline/select_teradata.sql b/hplsql/src/test/queries/offline/select_teradata.sql
new file mode 100644
index 0000000..69522b8
--- /dev/null
+++ b/hplsql/src/test/queries/offline/select_teradata.sql
@@ -0,0 +1,12 @@
+SELECT branch_code,
+ branch_no,
+ c_no,
+ cd_type
+FROM EMPLOYEE
+ WHERE S_CODE = 'C'
+ AND (branch_no) NOT IN (
+ SELECT branch_code
+ FROM DEPARTMENT
+ WHERE branch_code = 'ABC'
+ )
+QUALIFY ROW_NUMBER() OVER (PARTITION BY c_no ORDER BY cd_type) = 1
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hive/blob/2d33d091/hplsql/src/test/results/db/select_into.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/db/select_into.out.txt b/hplsql/src/test/results/db/select_into.out.txt
index 3f4ae31..6e4a69c 100644
--- a/hplsql/src/test/results/db/select_into.out.txt
+++ b/hplsql/src/test/results/db/select_into.out.txt
@@ -6,7 +6,8 @@ Ln:5 DECLARE v_dec DECIMAL
Ln:6 DECLARE v_dec0 DECIMAL
Ln:7 DECLARE v_str STRING
Ln:9 SELECT
-Ln:9 SELECT CAST(1 AS BIGINT), CAST(1 AS INT), CAST(1 AS SMALLINT), CAST(1 AS TINYINT), CAST(1.1 AS DECIMAL(18,2)), CAST(1.1 AS DECIMAL(18,0)) FROM src LIMIT 1
+Ln:9 SELECT CAST(1 AS BIGINT), CAST(1 AS INT), CAST(1 AS SMALLINT), CAST(1 AS TINYINT), CAST(1.1 AS DECIMAL(18,2)), CAST(1.1 AS DECIMAL(18,0))
+FROM src LIMIT 1
Ln:9 SELECT completed successfully
Ln:9 SELECT INTO statement executed
Ln:9 COLUMN: _c0, bigint
http://git-wip-us.apache.org/repos/asf/hive/blob/2d33d091/hplsql/src/test/results/db/select_into2.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/db/select_into2.out.txt b/hplsql/src/test/results/db/select_into2.out.txt
index 03e67ad..582fdfb 100644
--- a/hplsql/src/test/results/db/select_into2.out.txt
+++ b/hplsql/src/test/results/db/select_into2.out.txt
@@ -2,7 +2,9 @@ Ln:1 DECLARE v_float float
Ln:2 DECLARE v_double double
Ln:3 DECLARE v_double2 double precision
Ln:5 SELECT
-Ln:5 select cast(1.1 as float), cast(1.1 as double), cast(1.1 as double) from src LIMIT 1
+Ln:5 select
+ cast(1.1 as float), cast(1.1 as double), cast(1.1 as double)
+from src LIMIT 1
Ln:5 SELECT completed successfully
Ln:5 SELECT INTO statement executed
Ln:5 COLUMN: _c0, float
http://git-wip-us.apache.org/repos/asf/hive/blob/2d33d091/hplsql/src/test/results/local/if3_bteq.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/local/if3_bteq.out.txt b/hplsql/src/test/results/local/if3_bteq.out.txt
new file mode 100644
index 0000000..47f3010
--- /dev/null
+++ b/hplsql/src/test/results/local/if3_bteq.out.txt
@@ -0,0 +1,3 @@
+Ln:1 IF
+Ln:1 IF TRUE executed
+Ln:1 QUIT
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hive/blob/2d33d091/hplsql/src/test/results/local/lang.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/local/lang.out.txt b/hplsql/src/test/results/local/lang.out.txt
index 0047ec4..b3c460a 100644
--- a/hplsql/src/test/results/local/lang.out.txt
+++ b/hplsql/src/test/results/local/lang.out.txt
@@ -7,19 +7,19 @@
-1.0
Ln:19 DECLARE abc int
Ln:20 DECLARE abc.abc int
-Ln:21 DECLARE abc . abc1 int
+Ln:21 DECLARE abc.abc1 int
Ln:22 DECLARE "abc" int
Ln:23 DECLARE "abc".abc int
Ln:24 DECLARE "abc"."abc" int
-Ln:25 DECLARE "abc" . "abc1" int
+Ln:25 DECLARE "abc"."abc1" int
Ln:26 DECLARE [abc] int
Ln:27 DECLARE [abc].abc int
Ln:28 DECLARE [abc].[abc] int
-Ln:29 DECLARE [abc] . [abc1] int
+Ln:29 DECLARE [abc].[abc1] int
Ln:30 DECLARE `abc` int
Ln:31 DECLARE `abc`.abc int
Ln:32 DECLARE `abc`.`abc` int
-Ln:33 DECLARE `abc` . `abc1` int
+Ln:33 DECLARE `abc`.`abc1` int
Ln:34 DECLARE :new.abc int
Ln:35 DECLARE @abc int
Ln:36 DECLARE _abc int
@@ -31,4 +31,4 @@ Ln:40 DECLARE abc_9 int
2
0
-2
-0
+0
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hive/blob/2d33d091/hplsql/src/test/results/offline/create_table_mssql.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/offline/create_table_mssql.out.txt b/hplsql/src/test/results/offline/create_table_mssql.out.txt
index 43b0aa7..29d03d6 100644
--- a/hplsql/src/test/results/offline/create_table_mssql.out.txt
+++ b/hplsql/src/test/results/offline/create_table_mssql.out.txt
@@ -1,24 +1,27 @@
Ln:1 CREATE TABLE
-Ln:1 CREATE TABLE mssql_t1 (d1 TIMESTAMP,
-nc1 STRING,
-n1 DECIMAL(3,0),
-n2 DECIMAL(3),
-n3 DECIMAL,
-v1 STRING,
-nv1 STRING,
-nv2 STRING
+Ln:1 CREATE TABLE mssql_t1 (
+ d1 TIMESTAMP,
+ nc1 STRING,
+ n1 DECIMAL(3,0),
+ n2 DECIMAL(3),
+ n3 DECIMAL,
+ v1 STRING,
+ nv1 STRING,
+ nv2 STRING
)
Ln:12 CREATE TABLE
-Ln:12 CREATE TABLE `mssql_t2` (`i1` INT,
-`v1` VARCHAR(350),
-`v2` STRING,
-`b1` TINYINT,
-`d1` TIMESTAMP
+Ln:12 CREATE TABLE `mssql_t2`(
+ `i1` INT,
+ `v1` VARCHAR(350),
+ `v2` STRING,
+ `b1` TINYINT,
+ `d1` TIMESTAMP
)
Ln:31 CREATE TABLE
-Ln:31 CREATE TABLE `default`.`mssql_t3` (`v1` VARCHAR(50),
-`s2` SMALLINT,
-`sd1` TIMESTAMP,
-`i1` INT,
-`v2` VARCHAR(100)
+Ln:31 CREATE TABLE `default`.`mssql_t3`(
+ `v1` VARCHAR(50),
+ `s2` SMALLINT,
+ `sd1` TIMESTAMP,
+ `i1` INT,
+ `v2` VARCHAR(100)
)
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hive/blob/2d33d091/hplsql/src/test/results/offline/create_table_mssql2.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/offline/create_table_mssql2.out.txt b/hplsql/src/test/results/offline/create_table_mssql2.out.txt
index a765c4a..8341411 100644
--- a/hplsql/src/test/results/offline/create_table_mssql2.out.txt
+++ b/hplsql/src/test/results/offline/create_table_mssql2.out.txt
@@ -1,10 +1,11 @@
Ln:1 USE
Ln:1 SQL statement: USE `mic.gr`
Ln:14 CREATE TABLE
-Ln:14 CREATE TABLE `downloads` (`id` int,
-`fileName` char(255),
-`fileType` char(10),
-`downloads` int,
-`fromDate` char(40),
-`untilDate` char(40)
+Ln:14 CREATE TABLE `downloads`(
+ `id` int,
+ `fileName` char(255),
+ `fileType` char(10),
+ `downloads` int,
+ `fromDate` char(40),
+ `untilDate` char(40)
)
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hive/blob/2d33d091/hplsql/src/test/results/offline/create_table_mysql.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/offline/create_table_mysql.out.txt b/hplsql/src/test/results/offline/create_table_mysql.out.txt
index b835135..d07796f 100644
--- a/hplsql/src/test/results/offline/create_table_mysql.out.txt
+++ b/hplsql/src/test/results/offline/create_table_mysql.out.txt
@@ -1,4 +1,5 @@
Ln:1 CREATE TABLE
-Ln:1 CREATE TABLE `users` (`id` int,
-`name` STRING
+Ln:1 CREATE TABLE IF NOT EXISTS `users` (
+ `id` int,
+ `name` STRING
) COMMENT 'users table'
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hive/blob/2d33d091/hplsql/src/test/results/offline/create_table_ora.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/offline/create_table_ora.out.txt b/hplsql/src/test/results/offline/create_table_ora.out.txt
index cf30c0f..972e00a 100644
--- a/hplsql/src/test/results/offline/create_table_ora.out.txt
+++ b/hplsql/src/test/results/offline/create_table_ora.out.txt
@@ -1,42 +1,49 @@
Ln:1 CREATE TABLE
-Ln:1 CREATE TABLE ora_t1 (n1 DECIMAL(3,0),
-v1 STRING
+Ln:1 CREATE TABLE ora_t1 (
+ n1 DECIMAL(3,0),
+ v1 STRING
)
Ln:6 CREATE TABLE
-Ln:6 CREATE TABLE `USER`.`EMP` (`EMPNO` DECIMAL(4,0),
-`ENAME` STRING,
-`JOB` STRING,
-`MGR` DECIMAL(4,0),
-`HIREDATE` DATE,
-`SAL` DECIMAL(7,2),
-`COMM` DECIMAL(7,2),
-`DEPTNO` DECIMAL(2,0)
-)
+Ln:6 CREATE TABLE `USER`.`EMP`
+ ( `EMPNO` DECIMAL(4,0),
+ `ENAME` STRING,
+ `JOB` STRING,
+ `MGR` DECIMAL(4,0),
+ `HIREDATE` DATE,
+ `SAL` DECIMAL(7,2),
+ `COMM` DECIMAL(7,2),
+ `DEPTNO` DECIMAL(2,0)
+ )
Ln:21 CREATE TABLE
-Ln:21 CREATE TABLE language (id DECIMAL(7),
-cd CHAR(2),
-description STRING
+Ln:21 CREATE TABLE language (
+ id DECIMAL(7),
+ cd CHAR(2),
+ description STRING
)
Ln:26 CREATE TABLE
-Ln:26 CREATE TABLE author (id DECIMAL(7),
-first_name STRING,
-last_name STRING,
-date_of_birth DATE,
-year_of_birth DECIMAL(7),
-distinguished DECIMAL(1)
+Ln:26 CREATE TABLE author (
+ id DECIMAL(7),
+ first_name STRING,
+ last_name STRING,
+ date_of_birth DATE,
+ year_of_birth DECIMAL(7),
+ distinguished DECIMAL(1)
)
Ln:34 CREATE TABLE
-Ln:34 CREATE TABLE book (id DECIMAL(7),
-author_id DECIMAL(7),
-title STRING,
-published_in DECIMAL(7),
-language_id DECIMAL(7)
+Ln:34 CREATE TABLE book (
+ id DECIMAL(7),
+ author_id DECIMAL(7),
+ title STRING,
+ published_in DECIMAL(7),
+ language_id DECIMAL(7)
)
Ln:43 CREATE TABLE
-Ln:43 CREATE TABLE book_store (name STRING
+Ln:43 CREATE TABLE book_store (
+ name STRING
)
Ln:46 CREATE TABLE
-Ln:46 CREATE TABLE book_to_book_store (name STRING,
-book_id INTEGER,
-stock INTEGER
+Ln:46 CREATE TABLE book_to_book_store (
+ name STRING,
+ book_id INTEGER,
+ stock INTEGER
)
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hive/blob/2d33d091/hplsql/src/test/results/offline/create_table_ora2.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/offline/create_table_ora2.out.txt b/hplsql/src/test/results/offline/create_table_ora2.out.txt
index 5d4e107..03f54e8 100644
--- a/hplsql/src/test/results/offline/create_table_ora2.out.txt
+++ b/hplsql/src/test/results/offline/create_table_ora2.out.txt
@@ -1,5 +1,6 @@
Ln:1 CREATE TABLE
-Ln:1 CREATE TABLE `default`.`AUDIT_LOGS` (`RUN_ID` STRING,
-`FILE_NAME` STRING,
-`RUN_DATE` DATE
-)
\ No newline at end of file
+Ln:1 CREATE TABLE `default`.`AUDIT_LOGS`
+ ( `RUN_ID` STRING,
+ `FILE_NAME` STRING,
+ `RUN_DATE` DATE
+ )
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hive/blob/2d33d091/hplsql/src/test/results/offline/create_table_pg.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/offline/create_table_pg.out.txt b/hplsql/src/test/results/offline/create_table_pg.out.txt
index cad5488..095eb12 100644
--- a/hplsql/src/test/results/offline/create_table_pg.out.txt
+++ b/hplsql/src/test/results/offline/create_table_pg.out.txt
@@ -1,5 +1,6 @@
Ln:1 CREATE TABLE
-Ln:1 create table i1 (c1 SMALLINT,
-c2 INT,
-c3 BIGINT
+Ln:1 create table i1 (
+ c1 SMALLINT,
+ c2 INT,
+ c3 BIGINT
)
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hive/blob/2d33d091/hplsql/src/test/results/offline/create_table_td.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/offline/create_table_td.out.txt b/hplsql/src/test/results/offline/create_table_td.out.txt
new file mode 100644
index 0000000..9b9d561
--- /dev/null
+++ b/hplsql/src/test/results/offline/create_table_td.out.txt
@@ -0,0 +1,31 @@
+Ln:1 CREATE TABLE
+Ln:1 CREATE TABLE tab
+ (
+ SOURCE_ID INT,
+ RUN_ID INT,
+ STATUS CHAR,
+ LOAD_START timestamp(0),
+ LOAD_END timestamp(0)
+ )
+Ln:10 CREATE TABLE
+Ln:10 CREATE TABLE ctl
+AS
+(
+ SELECT
+ EBC.SOURCE_ID, MAX(EBC.RUN_ID) AS RUN_ID, EBC.STATUS, EBC.LOAD_START, EBC.LOAD_END
+ FROM EBC
+ WHERE EBC.SOURCE_ID = 451 AND EBC.STATUS = 'R'
+ GROUP BY
+ 1,3,4,5
+)
+Ln:28 CREATE LOCAL TEMPORARY TABLE
+CREATE TEMPORARY TABLE ctl2
+AS
+(
+ SELECT
+ EBC.SOURCE_ID, MAX(EBC.RUN_ID) AS RUN_ID, EBC.STATUS, EBC.LOAD_START, EBC.LOAD_END
+ FROM EBC
+ WHERE EBC.SOURCE_ID = 451 AND EBC.STATUS = 'R'
+ GROUP BY
+ 1,3,4,5
+)
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hive/blob/2d33d091/hplsql/src/test/results/offline/delete_all.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/offline/delete_all.out.txt b/hplsql/src/test/results/offline/delete_all.out.txt
new file mode 100644
index 0000000..0cecc95
--- /dev/null
+++ b/hplsql/src/test/results/offline/delete_all.out.txt
@@ -0,0 +1,2 @@
+Ln:1 DELETE
+Ln:1 TRUNCATE TABLE TEST1_DB.WK_WRK
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hive/blob/2d33d091/hplsql/src/test/results/offline/select.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/offline/select.out.txt b/hplsql/src/test/results/offline/select.out.txt
new file mode 100644
index 0000000..529f0b5
--- /dev/null
+++ b/hplsql/src/test/results/offline/select.out.txt
@@ -0,0 +1,34 @@
+Ln:1 SELECT
+Ln:1 SELECT * FROM a where 1 = 1 and not exists (select * from b)
+Ln:1 Not executed - offline mode set
+Ln:3 SELECT
+Ln:3 SELECT *
+ FROM a
+ where not exists
+ (
+ select * from b
+ )
+Ln:3 Not executed - offline mode set
+Ln:10 SELECT
+Ln:10 SELECT
+ *
+ FROM tab
+ WHERE FILE_DATE > (
+ SELECT
+ MAX(FILE_DATE) AS MX_C_FILE_DT
+ FROM tab
+ WHERE FLAG = 'C' AND IND = 'C' AND FILE_DATE < ( SELECT
+ CAST( LOAD_START AS DATE)
+ FROM tab
+ WHERE SOURCE_ID = 451 AND BATCH = 'R'
+ )
+ )
+Ln:10 Not executed - offline mode set
+Ln:31 SELECT
+Ln:31 SELECT
+*
+FROM DLTA_POC LEFT OUTER JOIN TEST3_DB.TET ORG ON DLTA_POC.YS_NO = ORG.EM_CODE_A
+ AND DLTA_POC.AREA_NO = ORG.AREA_CODE_2
+ AND DLTA_POC.GNT_POC = ORG.GEN_CD LEFT OUTER JOIN TEST.LOCATION LOC ON DLTA_POC.SE_KEY_POC = LOC.LOC_ID
+ AND LOC.LOCATION_END_DT = DATE '9999-12-31'
+Ln:31 Not executed - offline mode set
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hive/blob/2d33d091/hplsql/src/test/results/offline/select_db2.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/offline/select_db2.out.txt b/hplsql/src/test/results/offline/select_db2.out.txt
index 1d64e8a..bb5b455 100644
--- a/hplsql/src/test/results/offline/select_db2.out.txt
+++ b/hplsql/src/test/results/offline/select_db2.out.txt
@@ -2,5 +2,6 @@ Ln:1 SELECT
Ln:1 select coalesce(max(info_id) + 1, 0) from sproc_info
Ln:1 Not executed - offline mode set
Ln:3 SELECT
-Ln:3 select cd, cd + inc days, cd - inc days + coalesce(inc, 0) days from (select date '2015-09-02' as cd, 3 as inc from sysibm.sysdummy1)
+Ln:3 select cd, cd + inc days, cd - inc days + coalesce(inc, 0) days
+from (select date '2015-09-02' as cd, 3 as inc from sysibm.sysdummy1)
Ln:3 Not executed - offline mode set
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hive/blob/2d33d091/hplsql/src/test/results/offline/select_teradata.out.txt
----------------------------------------------------------------------
diff --git a/hplsql/src/test/results/offline/select_teradata.out.txt b/hplsql/src/test/results/offline/select_teradata.out.txt
new file mode 100644
index 0000000..34ab433
--- /dev/null
+++ b/hplsql/src/test/results/offline/select_teradata.out.txt
@@ -0,0 +1,10 @@
+Ln:1 SELECT
+Ln:1 SELECT branch_code, branch_no, c_no, cd_type
+FROM EMPLOYEE
+ WHERE S_CODE = 'C' AND (branch_no) NOT IN (
+ SELECT branch_code
+ FROM DEPARTMENT
+ WHERE branch_code = 'ABC'
+ )
+QUALIFY ROW_NUMBER() OVER (PARTITION BY c_no ORDER BY cd_type) = 1
+Ln:1 Not executed - offline mode set
\ No newline at end of file
[12/20] hive git commit: HIVE-13592 : metastore calls map is not
thread safe (Sergey Shelukhin, reviewed by Aihua Xu)
Posted by jd...@apache.org.
HIVE-13592 : metastore calls map is not thread safe (Sergey Shelukhin, reviewed by Aihua Xu)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/f68b5dbb
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/f68b5dbb
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/f68b5dbb
Branch: refs/heads/llap
Commit: f68b5dbb59a9e837209e64aefe5aa994476c0bdc
Parents: e68783c
Author: Sergey Shelukhin <se...@apache.org>
Authored: Wed May 4 17:05:20 2016 -0700
Committer: Sergey Shelukhin <se...@apache.org>
Committed: Wed May 4 17:05:39 2016 -0700
----------------------------------------------------------------------
.../hive/metastore/RetryingMetaStoreClient.java | 17 +++++++++--------
.../org/apache/hadoop/hive/ql/metadata/Hive.java | 3 ++-
2 files changed, 11 insertions(+), 9 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/f68b5dbb/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java
index f672adf..3c125e0 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java
@@ -25,6 +25,7 @@ import java.lang.reflect.Method;
import java.lang.reflect.Proxy;
import java.lang.reflect.UndeclaredThrowableException;
import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
import org.slf4j.Logger;
@@ -55,14 +56,14 @@ public class RetryingMetaStoreClient implements InvocationHandler {
private final IMetaStoreClient base;
private final int retryLimit;
private final long retryDelaySeconds;
- private final Map<String, Long> metaCallTimeMap;
+ private final ConcurrentHashMap<String, Long> metaCallTimeMap;
private final long connectionLifeTimeInMillis;
private long lastConnectionTime;
private boolean localMetaStore;
protected RetryingMetaStoreClient(HiveConf hiveConf, Class<?>[] constructorArgTypes,
- Object[] constructorArgs, Map<String, Long> metaCallTimeMap,
+ Object[] constructorArgs, ConcurrentHashMap<String, Long> metaCallTimeMap,
Class<? extends IMetaStoreClient> msClientClass) throws MetaException {
this.retryLimit = hiveConf.getIntVar(HiveConf.ConfVars.METASTORETHRIFTFAILURERETRIES);
@@ -94,7 +95,7 @@ public class RetryingMetaStoreClient implements InvocationHandler {
}
public static IMetaStoreClient getProxy(HiveConf hiveConf, HiveMetaHookLoader hookLoader,
- Map<String, Long> metaCallTimeMap, String mscClassName, boolean allowEmbedded)
+ ConcurrentHashMap<String, Long> metaCallTimeMap, String mscClassName, boolean allowEmbedded)
throws MetaException {
return getProxy(hiveConf,
@@ -119,7 +120,7 @@ public class RetryingMetaStoreClient implements InvocationHandler {
* Please use getProxy(HiveConf hiveConf, HiveMetaHookLoader hookLoader) for external purpose.
*/
public static IMetaStoreClient getProxy(HiveConf hiveConf, Class<?>[] constructorArgTypes,
- Object[] constructorArgs, Map<String, Long> metaCallTimeMap,
+ Object[] constructorArgs, ConcurrentHashMap<String, Long> metaCallTimeMap,
String mscClassName) throws MetaException {
@SuppressWarnings("unchecked")
@@ -202,11 +203,11 @@ public class RetryingMetaStoreClient implements InvocationHandler {
private void addMethodTime(Method method, long timeTaken) {
String methodStr = getMethodString(method);
- Long curTime = metaCallTimeMap.get(methodStr);
- if (curTime != null) {
- timeTaken += curTime;
+ while (true) {
+ Long curTime = metaCallTimeMap.get(methodStr), newTime = timeTaken;
+ if (curTime != null && metaCallTimeMap.replace(methodStr, curTime, newTime + curTime)) break;
+ if (curTime == null && (null == metaCallTimeMap.putIfAbsent(methodStr, newTime))) break;
}
- metaCallTimeMap.put(methodStr, timeTaken);
}
/**
http://git-wip-us.apache.org/repos/asf/hive/blob/f68b5dbb/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index 6862f70..f4a9772 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -48,6 +48,7 @@ import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.ConcurrentHashMap;
import com.google.common.collect.ImmutableMap;
@@ -162,7 +163,7 @@ public class Hive {
private UserGroupInformation owner;
// metastore calls timing information
- private final Map<String, Long> metaCallTimeMap = new HashMap<String, Long>();
+ private final ConcurrentHashMap<String, Long> metaCallTimeMap = new ConcurrentHashMap<>();
private static ThreadLocal<Hive> hiveDB = new ThreadLocal<Hive>() {
@Override
[04/20] hive git commit: HIVE-13351: Support drop Primary Key/Foreign
Key constraints (Hari Subramaniyan, reviewed by Ashutosh Chauhan)
Posted by jd...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
index 051c1f2..2a81c4b 100644
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
@@ -80,6 +80,8 @@ public class ThriftHiveMetastore {
public void create_table_with_constraints(Table tbl, List<SQLPrimaryKey> primaryKeys, List<SQLForeignKey> foreignKeys) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, org.apache.thrift.TException;
+ public void drop_constraint(DropConstraintRequest req) throws NoSuchObjectException, MetaException, org.apache.thrift.TException;
+
public void drop_table(String dbname, String name, boolean deleteData) throws NoSuchObjectException, MetaException, org.apache.thrift.TException;
public void drop_table_with_environment_context(String dbname, String name, boolean deleteData, EnvironmentContext environment_context) throws NoSuchObjectException, MetaException, org.apache.thrift.TException;
@@ -376,6 +378,8 @@ public class ThriftHiveMetastore {
public void create_table_with_constraints(Table tbl, List<SQLPrimaryKey> primaryKeys, List<SQLForeignKey> foreignKeys, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ public void drop_constraint(DropConstraintRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+
public void drop_table(String dbname, String name, boolean deleteData, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
public void drop_table_with_environment_context(String dbname, String name, boolean deleteData, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
@@ -1218,6 +1222,32 @@ public class ThriftHiveMetastore {
return;
}
+ public void drop_constraint(DropConstraintRequest req) throws NoSuchObjectException, MetaException, org.apache.thrift.TException
+ {
+ send_drop_constraint(req);
+ recv_drop_constraint();
+ }
+
+ public void send_drop_constraint(DropConstraintRequest req) throws org.apache.thrift.TException
+ {
+ drop_constraint_args args = new drop_constraint_args();
+ args.setReq(req);
+ sendBase("drop_constraint", args);
+ }
+
+ public void recv_drop_constraint() throws NoSuchObjectException, MetaException, org.apache.thrift.TException
+ {
+ drop_constraint_result result = new drop_constraint_result();
+ receiveBase(result, "drop_constraint");
+ if (result.o1 != null) {
+ throw result.o1;
+ }
+ if (result.o3 != null) {
+ throw result.o3;
+ }
+ return;
+ }
+
public void drop_table(String dbname, String name, boolean deleteData) throws NoSuchObjectException, MetaException, org.apache.thrift.TException
{
send_drop_table(dbname, name, deleteData);
@@ -5535,6 +5565,38 @@ public class ThriftHiveMetastore {
}
}
+ public void drop_constraint(DropConstraintRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+ checkReady();
+ drop_constraint_call method_call = new drop_constraint_call(req, resultHandler, this, ___protocolFactory, ___transport);
+ this.___currentMethod = method_call;
+ ___manager.call(method_call);
+ }
+
+ public static class drop_constraint_call extends org.apache.thrift.async.TAsyncMethodCall {
+ private DropConstraintRequest req;
+ public drop_constraint_call(DropConstraintRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+ super(client, protocolFactory, transport, resultHandler, false);
+ this.req = req;
+ }
+
+ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+ prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("drop_constraint", org.apache.thrift.protocol.TMessageType.CALL, 0));
+ drop_constraint_args args = new drop_constraint_args();
+ args.setReq(req);
+ args.write(prot);
+ prot.writeMessageEnd();
+ }
+
+ public void getResult() throws NoSuchObjectException, MetaException, org.apache.thrift.TException {
+ if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+ throw new IllegalStateException("Method call not finished!");
+ }
+ org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
+ org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
+ (new Client(prot)).recv_drop_constraint();
+ }
+ }
+
public void drop_table(String dbname, String name, boolean deleteData, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
checkReady();
drop_table_call method_call = new drop_table_call(dbname, name, deleteData, resultHandler, this, ___protocolFactory, ___transport);
@@ -10078,6 +10140,7 @@ public class ThriftHiveMetastore {
processMap.put("create_table", new create_table());
processMap.put("create_table_with_environment_context", new create_table_with_environment_context());
processMap.put("create_table_with_constraints", new create_table_with_constraints());
+ processMap.put("drop_constraint", new drop_constraint());
processMap.put("drop_table", new drop_table());
processMap.put("drop_table_with_environment_context", new drop_table_with_environment_context());
processMap.put("get_tables", new get_tables());
@@ -10720,6 +10783,32 @@ public class ThriftHiveMetastore {
}
}
+ public static class drop_constraint<I extends Iface> extends org.apache.thrift.ProcessFunction<I, drop_constraint_args> {
+ public drop_constraint() {
+ super("drop_constraint");
+ }
+
+ public drop_constraint_args getEmptyArgsInstance() {
+ return new drop_constraint_args();
+ }
+
+ protected boolean isOneway() {
+ return false;
+ }
+
+ public drop_constraint_result getResult(I iface, drop_constraint_args args) throws org.apache.thrift.TException {
+ drop_constraint_result result = new drop_constraint_result();
+ try {
+ iface.drop_constraint(args.req);
+ } catch (NoSuchObjectException o1) {
+ result.o1 = o1;
+ } catch (MetaException o3) {
+ result.o3 = o3;
+ }
+ return result;
+ }
+ }
+
public static class drop_table<I extends Iface> extends org.apache.thrift.ProcessFunction<I, drop_table_args> {
public drop_table() {
super("drop_table");
@@ -13964,6 +14053,7 @@ public class ThriftHiveMetastore {
processMap.put("create_table", new create_table());
processMap.put("create_table_with_environment_context", new create_table_with_environment_context());
processMap.put("create_table_with_constraints", new create_table_with_constraints());
+ processMap.put("drop_constraint", new drop_constraint());
processMap.put("drop_table", new drop_table());
processMap.put("drop_table_with_environment_context", new drop_table_with_environment_context());
processMap.put("get_tables", new get_tables());
@@ -15307,20 +15397,20 @@ public class ThriftHiveMetastore {
}
}
- public static class drop_table<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, drop_table_args, Void> {
- public drop_table() {
- super("drop_table");
+ public static class drop_constraint<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, drop_constraint_args, Void> {
+ public drop_constraint() {
+ super("drop_constraint");
}
- public drop_table_args getEmptyArgsInstance() {
- return new drop_table_args();
+ public drop_constraint_args getEmptyArgsInstance() {
+ return new drop_constraint_args();
}
public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
final org.apache.thrift.AsyncProcessFunction fcall = this;
return new AsyncMethodCallback<Void>() {
public void onComplete(Void o) {
- drop_table_result result = new drop_table_result();
+ drop_constraint_result result = new drop_constraint_result();
try {
fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
return;
@@ -15332,7 +15422,7 @@ public class ThriftHiveMetastore {
public void onError(Exception e) {
byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
org.apache.thrift.TBase msg;
- drop_table_result result = new drop_table_result();
+ drop_constraint_result result = new drop_constraint_result();
if (e instanceof NoSuchObjectException) {
result.o1 = (NoSuchObjectException) e;
result.setO1IsSet(true);
@@ -15363,25 +15453,25 @@ public class ThriftHiveMetastore {
return false;
}
- public void start(I iface, drop_table_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
- iface.drop_table(args.dbname, args.name, args.deleteData,resultHandler);
+ public void start(I iface, drop_constraint_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
+ iface.drop_constraint(args.req,resultHandler);
}
}
- public static class drop_table_with_environment_context<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, drop_table_with_environment_context_args, Void> {
- public drop_table_with_environment_context() {
- super("drop_table_with_environment_context");
+ public static class drop_table<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, drop_table_args, Void> {
+ public drop_table() {
+ super("drop_table");
}
- public drop_table_with_environment_context_args getEmptyArgsInstance() {
- return new drop_table_with_environment_context_args();
+ public drop_table_args getEmptyArgsInstance() {
+ return new drop_table_args();
}
public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
final org.apache.thrift.AsyncProcessFunction fcall = this;
return new AsyncMethodCallback<Void>() {
public void onComplete(Void o) {
- drop_table_with_environment_context_result result = new drop_table_with_environment_context_result();
+ drop_table_result result = new drop_table_result();
try {
fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
return;
@@ -15393,7 +15483,7 @@ public class ThriftHiveMetastore {
public void onError(Exception e) {
byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
org.apache.thrift.TBase msg;
- drop_table_with_environment_context_result result = new drop_table_with_environment_context_result();
+ drop_table_result result = new drop_table_result();
if (e instanceof NoSuchObjectException) {
result.o1 = (NoSuchObjectException) e;
result.setO1IsSet(true);
@@ -15424,259 +15514,25 @@ public class ThriftHiveMetastore {
return false;
}
- public void start(I iface, drop_table_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
- iface.drop_table_with_environment_context(args.dbname, args.name, args.deleteData, args.environment_context,resultHandler);
- }
- }
-
- public static class get_tables<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_tables_args, List<String>> {
- public get_tables() {
- super("get_tables");
- }
-
- public get_tables_args getEmptyArgsInstance() {
- return new get_tables_args();
- }
-
- public AsyncMethodCallback<List<String>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
- final org.apache.thrift.AsyncProcessFunction fcall = this;
- return new AsyncMethodCallback<List<String>>() {
- public void onComplete(List<String> o) {
- get_tables_result result = new get_tables_result();
- result.success = o;
- try {
- fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
- return;
- } catch (Exception e) {
- LOGGER.error("Exception writing to internal frame buffer", e);
- }
- fb.close();
- }
- public void onError(Exception e) {
- byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
- org.apache.thrift.TBase msg;
- get_tables_result result = new get_tables_result();
- if (e instanceof MetaException) {
- result.o1 = (MetaException) e;
- result.setO1IsSet(true);
- msg = result;
- }
- else
- {
- msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
- msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
- }
- try {
- fcall.sendResponse(fb,msg,msgType,seqid);
- return;
- } catch (Exception ex) {
- LOGGER.error("Exception writing to internal frame buffer", ex);
- }
- fb.close();
- }
- };
- }
-
- protected boolean isOneway() {
- return false;
- }
-
- public void start(I iface, get_tables_args args, org.apache.thrift.async.AsyncMethodCallback<List<String>> resultHandler) throws TException {
- iface.get_tables(args.db_name, args.pattern,resultHandler);
- }
- }
-
- public static class get_table_meta<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_table_meta_args, List<TableMeta>> {
- public get_table_meta() {
- super("get_table_meta");
- }
-
- public get_table_meta_args getEmptyArgsInstance() {
- return new get_table_meta_args();
- }
-
- public AsyncMethodCallback<List<TableMeta>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
- final org.apache.thrift.AsyncProcessFunction fcall = this;
- return new AsyncMethodCallback<List<TableMeta>>() {
- public void onComplete(List<TableMeta> o) {
- get_table_meta_result result = new get_table_meta_result();
- result.success = o;
- try {
- fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
- return;
- } catch (Exception e) {
- LOGGER.error("Exception writing to internal frame buffer", e);
- }
- fb.close();
- }
- public void onError(Exception e) {
- byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
- org.apache.thrift.TBase msg;
- get_table_meta_result result = new get_table_meta_result();
- if (e instanceof MetaException) {
- result.o1 = (MetaException) e;
- result.setO1IsSet(true);
- msg = result;
- }
- else
- {
- msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
- msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
- }
- try {
- fcall.sendResponse(fb,msg,msgType,seqid);
- return;
- } catch (Exception ex) {
- LOGGER.error("Exception writing to internal frame buffer", ex);
- }
- fb.close();
- }
- };
- }
-
- protected boolean isOneway() {
- return false;
- }
-
- public void start(I iface, get_table_meta_args args, org.apache.thrift.async.AsyncMethodCallback<List<TableMeta>> resultHandler) throws TException {
- iface.get_table_meta(args.db_patterns, args.tbl_patterns, args.tbl_types,resultHandler);
- }
- }
-
- public static class get_all_tables<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_all_tables_args, List<String>> {
- public get_all_tables() {
- super("get_all_tables");
- }
-
- public get_all_tables_args getEmptyArgsInstance() {
- return new get_all_tables_args();
- }
-
- public AsyncMethodCallback<List<String>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
- final org.apache.thrift.AsyncProcessFunction fcall = this;
- return new AsyncMethodCallback<List<String>>() {
- public void onComplete(List<String> o) {
- get_all_tables_result result = new get_all_tables_result();
- result.success = o;
- try {
- fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
- return;
- } catch (Exception e) {
- LOGGER.error("Exception writing to internal frame buffer", e);
- }
- fb.close();
- }
- public void onError(Exception e) {
- byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
- org.apache.thrift.TBase msg;
- get_all_tables_result result = new get_all_tables_result();
- if (e instanceof MetaException) {
- result.o1 = (MetaException) e;
- result.setO1IsSet(true);
- msg = result;
- }
- else
- {
- msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
- msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
- }
- try {
- fcall.sendResponse(fb,msg,msgType,seqid);
- return;
- } catch (Exception ex) {
- LOGGER.error("Exception writing to internal frame buffer", ex);
- }
- fb.close();
- }
- };
- }
-
- protected boolean isOneway() {
- return false;
- }
-
- public void start(I iface, get_all_tables_args args, org.apache.thrift.async.AsyncMethodCallback<List<String>> resultHandler) throws TException {
- iface.get_all_tables(args.db_name,resultHandler);
- }
- }
-
- public static class get_table<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_table_args, Table> {
- public get_table() {
- super("get_table");
- }
-
- public get_table_args getEmptyArgsInstance() {
- return new get_table_args();
- }
-
- public AsyncMethodCallback<Table> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
- final org.apache.thrift.AsyncProcessFunction fcall = this;
- return new AsyncMethodCallback<Table>() {
- public void onComplete(Table o) {
- get_table_result result = new get_table_result();
- result.success = o;
- try {
- fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
- return;
- } catch (Exception e) {
- LOGGER.error("Exception writing to internal frame buffer", e);
- }
- fb.close();
- }
- public void onError(Exception e) {
- byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
- org.apache.thrift.TBase msg;
- get_table_result result = new get_table_result();
- if (e instanceof MetaException) {
- result.o1 = (MetaException) e;
- result.setO1IsSet(true);
- msg = result;
- }
- else if (e instanceof NoSuchObjectException) {
- result.o2 = (NoSuchObjectException) e;
- result.setO2IsSet(true);
- msg = result;
- }
- else
- {
- msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
- msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
- }
- try {
- fcall.sendResponse(fb,msg,msgType,seqid);
- return;
- } catch (Exception ex) {
- LOGGER.error("Exception writing to internal frame buffer", ex);
- }
- fb.close();
- }
- };
- }
-
- protected boolean isOneway() {
- return false;
- }
-
- public void start(I iface, get_table_args args, org.apache.thrift.async.AsyncMethodCallback<Table> resultHandler) throws TException {
- iface.get_table(args.dbname, args.tbl_name,resultHandler);
+ public void start(I iface, drop_table_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
+ iface.drop_table(args.dbname, args.name, args.deleteData,resultHandler);
}
}
- public static class get_table_objects_by_name<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_table_objects_by_name_args, List<Table>> {
- public get_table_objects_by_name() {
- super("get_table_objects_by_name");
+ public static class drop_table_with_environment_context<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, drop_table_with_environment_context_args, Void> {
+ public drop_table_with_environment_context() {
+ super("drop_table_with_environment_context");
}
- public get_table_objects_by_name_args getEmptyArgsInstance() {
- return new get_table_objects_by_name_args();
+ public drop_table_with_environment_context_args getEmptyArgsInstance() {
+ return new drop_table_with_environment_context_args();
}
- public AsyncMethodCallback<List<Table>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+ public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
final org.apache.thrift.AsyncProcessFunction fcall = this;
- return new AsyncMethodCallback<List<Table>>() {
- public void onComplete(List<Table> o) {
- get_table_objects_by_name_result result = new get_table_objects_by_name_result();
- result.success = o;
+ return new AsyncMethodCallback<Void>() {
+ public void onComplete(Void o) {
+ drop_table_with_environment_context_result result = new drop_table_with_environment_context_result();
try {
fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
return;
@@ -15688,19 +15544,14 @@ public class ThriftHiveMetastore {
public void onError(Exception e) {
byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
org.apache.thrift.TBase msg;
- get_table_objects_by_name_result result = new get_table_objects_by_name_result();
- if (e instanceof MetaException) {
- result.o1 = (MetaException) e;
+ drop_table_with_environment_context_result result = new drop_table_with_environment_context_result();
+ if (e instanceof NoSuchObjectException) {
+ result.o1 = (NoSuchObjectException) e;
result.setO1IsSet(true);
msg = result;
}
- else if (e instanceof InvalidOperationException) {
- result.o2 = (InvalidOperationException) e;
- result.setO2IsSet(true);
- msg = result;
- }
- else if (e instanceof UnknownDBException) {
- result.o3 = (UnknownDBException) e;
+ else if (e instanceof MetaException) {
+ result.o3 = (MetaException) e;
result.setO3IsSet(true);
msg = result;
}
@@ -15724,25 +15575,25 @@ public class ThriftHiveMetastore {
return false;
}
- public void start(I iface, get_table_objects_by_name_args args, org.apache.thrift.async.AsyncMethodCallback<List<Table>> resultHandler) throws TException {
- iface.get_table_objects_by_name(args.dbname, args.tbl_names,resultHandler);
+ public void start(I iface, drop_table_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
+ iface.drop_table_with_environment_context(args.dbname, args.name, args.deleteData, args.environment_context,resultHandler);
}
}
- public static class get_table_names_by_filter<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_table_names_by_filter_args, List<String>> {
- public get_table_names_by_filter() {
- super("get_table_names_by_filter");
+ public static class get_tables<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_tables_args, List<String>> {
+ public get_tables() {
+ super("get_tables");
}
- public get_table_names_by_filter_args getEmptyArgsInstance() {
- return new get_table_names_by_filter_args();
+ public get_tables_args getEmptyArgsInstance() {
+ return new get_tables_args();
}
public AsyncMethodCallback<List<String>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
final org.apache.thrift.AsyncProcessFunction fcall = this;
return new AsyncMethodCallback<List<String>>() {
public void onComplete(List<String> o) {
- get_table_names_by_filter_result result = new get_table_names_by_filter_result();
+ get_tables_result result = new get_tables_result();
result.success = o;
try {
fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
@@ -15755,22 +15606,12 @@ public class ThriftHiveMetastore {
public void onError(Exception e) {
byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
org.apache.thrift.TBase msg;
- get_table_names_by_filter_result result = new get_table_names_by_filter_result();
+ get_tables_result result = new get_tables_result();
if (e instanceof MetaException) {
result.o1 = (MetaException) e;
result.setO1IsSet(true);
msg = result;
}
- else if (e instanceof InvalidOperationException) {
- result.o2 = (InvalidOperationException) e;
- result.setO2IsSet(true);
- msg = result;
- }
- else if (e instanceof UnknownDBException) {
- result.o3 = (UnknownDBException) e;
- result.setO3IsSet(true);
- msg = result;
- }
else
{
msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
@@ -15791,25 +15632,26 @@ public class ThriftHiveMetastore {
return false;
}
- public void start(I iface, get_table_names_by_filter_args args, org.apache.thrift.async.AsyncMethodCallback<List<String>> resultHandler) throws TException {
- iface.get_table_names_by_filter(args.dbname, args.filter, args.max_tables,resultHandler);
+ public void start(I iface, get_tables_args args, org.apache.thrift.async.AsyncMethodCallback<List<String>> resultHandler) throws TException {
+ iface.get_tables(args.db_name, args.pattern,resultHandler);
}
}
- public static class alter_table<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, alter_table_args, Void> {
- public alter_table() {
- super("alter_table");
+ public static class get_table_meta<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_table_meta_args, List<TableMeta>> {
+ public get_table_meta() {
+ super("get_table_meta");
}
- public alter_table_args getEmptyArgsInstance() {
- return new alter_table_args();
+ public get_table_meta_args getEmptyArgsInstance() {
+ return new get_table_meta_args();
}
- public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+ public AsyncMethodCallback<List<TableMeta>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
final org.apache.thrift.AsyncProcessFunction fcall = this;
- return new AsyncMethodCallback<Void>() {
- public void onComplete(Void o) {
- alter_table_result result = new alter_table_result();
+ return new AsyncMethodCallback<List<TableMeta>>() {
+ public void onComplete(List<TableMeta> o) {
+ get_table_meta_result result = new get_table_meta_result();
+ result.success = o;
try {
fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
return;
@@ -15821,17 +15663,12 @@ public class ThriftHiveMetastore {
public void onError(Exception e) {
byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
org.apache.thrift.TBase msg;
- alter_table_result result = new alter_table_result();
- if (e instanceof InvalidOperationException) {
- result.o1 = (InvalidOperationException) e;
+ get_table_meta_result result = new get_table_meta_result();
+ if (e instanceof MetaException) {
+ result.o1 = (MetaException) e;
result.setO1IsSet(true);
msg = result;
}
- else if (e instanceof MetaException) {
- result.o2 = (MetaException) e;
- result.setO2IsSet(true);
- msg = result;
- }
else
{
msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
@@ -15852,25 +15689,26 @@ public class ThriftHiveMetastore {
return false;
}
- public void start(I iface, alter_table_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
- iface.alter_table(args.dbname, args.tbl_name, args.new_tbl,resultHandler);
+ public void start(I iface, get_table_meta_args args, org.apache.thrift.async.AsyncMethodCallback<List<TableMeta>> resultHandler) throws TException {
+ iface.get_table_meta(args.db_patterns, args.tbl_patterns, args.tbl_types,resultHandler);
}
}
- public static class alter_table_with_environment_context<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, alter_table_with_environment_context_args, Void> {
- public alter_table_with_environment_context() {
- super("alter_table_with_environment_context");
+ public static class get_all_tables<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_all_tables_args, List<String>> {
+ public get_all_tables() {
+ super("get_all_tables");
}
- public alter_table_with_environment_context_args getEmptyArgsInstance() {
- return new alter_table_with_environment_context_args();
+ public get_all_tables_args getEmptyArgsInstance() {
+ return new get_all_tables_args();
}
- public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+ public AsyncMethodCallback<List<String>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
final org.apache.thrift.AsyncProcessFunction fcall = this;
- return new AsyncMethodCallback<Void>() {
- public void onComplete(Void o) {
- alter_table_with_environment_context_result result = new alter_table_with_environment_context_result();
+ return new AsyncMethodCallback<List<String>>() {
+ public void onComplete(List<String> o) {
+ get_all_tables_result result = new get_all_tables_result();
+ result.success = o;
try {
fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
return;
@@ -15882,17 +15720,12 @@ public class ThriftHiveMetastore {
public void onError(Exception e) {
byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
org.apache.thrift.TBase msg;
- alter_table_with_environment_context_result result = new alter_table_with_environment_context_result();
- if (e instanceof InvalidOperationException) {
- result.o1 = (InvalidOperationException) e;
+ get_all_tables_result result = new get_all_tables_result();
+ if (e instanceof MetaException) {
+ result.o1 = (MetaException) e;
result.setO1IsSet(true);
msg = result;
}
- else if (e instanceof MetaException) {
- result.o2 = (MetaException) e;
- result.setO2IsSet(true);
- msg = result;
- }
else
{
msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
@@ -15913,25 +15746,26 @@ public class ThriftHiveMetastore {
return false;
}
- public void start(I iface, alter_table_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
- iface.alter_table_with_environment_context(args.dbname, args.tbl_name, args.new_tbl, args.environment_context,resultHandler);
+ public void start(I iface, get_all_tables_args args, org.apache.thrift.async.AsyncMethodCallback<List<String>> resultHandler) throws TException {
+ iface.get_all_tables(args.db_name,resultHandler);
}
}
- public static class alter_table_with_cascade<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, alter_table_with_cascade_args, Void> {
- public alter_table_with_cascade() {
- super("alter_table_with_cascade");
+ public static class get_table<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_table_args, Table> {
+ public get_table() {
+ super("get_table");
}
- public alter_table_with_cascade_args getEmptyArgsInstance() {
- return new alter_table_with_cascade_args();
+ public get_table_args getEmptyArgsInstance() {
+ return new get_table_args();
}
- public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+ public AsyncMethodCallback<Table> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
final org.apache.thrift.AsyncProcessFunction fcall = this;
- return new AsyncMethodCallback<Void>() {
- public void onComplete(Void o) {
- alter_table_with_cascade_result result = new alter_table_with_cascade_result();
+ return new AsyncMethodCallback<Table>() {
+ public void onComplete(Table o) {
+ get_table_result result = new get_table_result();
+ result.success = o;
try {
fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
return;
@@ -15943,14 +15777,14 @@ public class ThriftHiveMetastore {
public void onError(Exception e) {
byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
org.apache.thrift.TBase msg;
- alter_table_with_cascade_result result = new alter_table_with_cascade_result();
- if (e instanceof InvalidOperationException) {
- result.o1 = (InvalidOperationException) e;
+ get_table_result result = new get_table_result();
+ if (e instanceof MetaException) {
+ result.o1 = (MetaException) e;
result.setO1IsSet(true);
msg = result;
}
- else if (e instanceof MetaException) {
- result.o2 = (MetaException) e;
+ else if (e instanceof NoSuchObjectException) {
+ result.o2 = (NoSuchObjectException) e;
result.setO2IsSet(true);
msg = result;
}
@@ -15974,25 +15808,25 @@ public class ThriftHiveMetastore {
return false;
}
- public void start(I iface, alter_table_with_cascade_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
- iface.alter_table_with_cascade(args.dbname, args.tbl_name, args.new_tbl, args.cascade,resultHandler);
+ public void start(I iface, get_table_args args, org.apache.thrift.async.AsyncMethodCallback<Table> resultHandler) throws TException {
+ iface.get_table(args.dbname, args.tbl_name,resultHandler);
}
}
- public static class add_partition<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, add_partition_args, Partition> {
- public add_partition() {
- super("add_partition");
+ public static class get_table_objects_by_name<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_table_objects_by_name_args, List<Table>> {
+ public get_table_objects_by_name() {
+ super("get_table_objects_by_name");
}
- public add_partition_args getEmptyArgsInstance() {
- return new add_partition_args();
+ public get_table_objects_by_name_args getEmptyArgsInstance() {
+ return new get_table_objects_by_name_args();
}
- public AsyncMethodCallback<Partition> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+ public AsyncMethodCallback<List<Table>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
final org.apache.thrift.AsyncProcessFunction fcall = this;
- return new AsyncMethodCallback<Partition>() {
- public void onComplete(Partition o) {
- add_partition_result result = new add_partition_result();
+ return new AsyncMethodCallback<List<Table>>() {
+ public void onComplete(List<Table> o) {
+ get_table_objects_by_name_result result = new get_table_objects_by_name_result();
result.success = o;
try {
fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
@@ -16005,19 +15839,19 @@ public class ThriftHiveMetastore {
public void onError(Exception e) {
byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
org.apache.thrift.TBase msg;
- add_partition_result result = new add_partition_result();
- if (e instanceof InvalidObjectException) {
- result.o1 = (InvalidObjectException) e;
+ get_table_objects_by_name_result result = new get_table_objects_by_name_result();
+ if (e instanceof MetaException) {
+ result.o1 = (MetaException) e;
result.setO1IsSet(true);
msg = result;
}
- else if (e instanceof AlreadyExistsException) {
- result.o2 = (AlreadyExistsException) e;
+ else if (e instanceof InvalidOperationException) {
+ result.o2 = (InvalidOperationException) e;
result.setO2IsSet(true);
msg = result;
}
- else if (e instanceof MetaException) {
- result.o3 = (MetaException) e;
+ else if (e instanceof UnknownDBException) {
+ result.o3 = (UnknownDBException) e;
result.setO3IsSet(true);
msg = result;
}
@@ -16041,25 +15875,25 @@ public class ThriftHiveMetastore {
return false;
}
- public void start(I iface, add_partition_args args, org.apache.thrift.async.AsyncMethodCallback<Partition> resultHandler) throws TException {
- iface.add_partition(args.new_part,resultHandler);
+ public void start(I iface, get_table_objects_by_name_args args, org.apache.thrift.async.AsyncMethodCallback<List<Table>> resultHandler) throws TException {
+ iface.get_table_objects_by_name(args.dbname, args.tbl_names,resultHandler);
}
}
- public static class add_partition_with_environment_context<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, add_partition_with_environment_context_args, Partition> {
- public add_partition_with_environment_context() {
- super("add_partition_with_environment_context");
+ public static class get_table_names_by_filter<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_table_names_by_filter_args, List<String>> {
+ public get_table_names_by_filter() {
+ super("get_table_names_by_filter");
}
- public add_partition_with_environment_context_args getEmptyArgsInstance() {
- return new add_partition_with_environment_context_args();
+ public get_table_names_by_filter_args getEmptyArgsInstance() {
+ return new get_table_names_by_filter_args();
}
- public AsyncMethodCallback<Partition> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+ public AsyncMethodCallback<List<String>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
final org.apache.thrift.AsyncProcessFunction fcall = this;
- return new AsyncMethodCallback<Partition>() {
- public void onComplete(Partition o) {
- add_partition_with_environment_context_result result = new add_partition_with_environment_context_result();
+ return new AsyncMethodCallback<List<String>>() {
+ public void onComplete(List<String> o) {
+ get_table_names_by_filter_result result = new get_table_names_by_filter_result();
result.success = o;
try {
fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
@@ -16072,19 +15906,19 @@ public class ThriftHiveMetastore {
public void onError(Exception e) {
byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
org.apache.thrift.TBase msg;
- add_partition_with_environment_context_result result = new add_partition_with_environment_context_result();
- if (e instanceof InvalidObjectException) {
- result.o1 = (InvalidObjectException) e;
+ get_table_names_by_filter_result result = new get_table_names_by_filter_result();
+ if (e instanceof MetaException) {
+ result.o1 = (MetaException) e;
result.setO1IsSet(true);
msg = result;
}
- else if (e instanceof AlreadyExistsException) {
- result.o2 = (AlreadyExistsException) e;
+ else if (e instanceof InvalidOperationException) {
+ result.o2 = (InvalidOperationException) e;
result.setO2IsSet(true);
msg = result;
}
- else if (e instanceof MetaException) {
- result.o3 = (MetaException) e;
+ else if (e instanceof UnknownDBException) {
+ result.o3 = (UnknownDBException) e;
result.setO3IsSet(true);
msg = result;
}
@@ -16108,27 +15942,25 @@ public class ThriftHiveMetastore {
return false;
}
- public void start(I iface, add_partition_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback<Partition> resultHandler) throws TException {
- iface.add_partition_with_environment_context(args.new_part, args.environment_context,resultHandler);
+ public void start(I iface, get_table_names_by_filter_args args, org.apache.thrift.async.AsyncMethodCallback<List<String>> resultHandler) throws TException {
+ iface.get_table_names_by_filter(args.dbname, args.filter, args.max_tables,resultHandler);
}
}
- public static class add_partitions<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, add_partitions_args, Integer> {
- public add_partitions() {
- super("add_partitions");
+ public static class alter_table<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, alter_table_args, Void> {
+ public alter_table() {
+ super("alter_table");
}
- public add_partitions_args getEmptyArgsInstance() {
- return new add_partitions_args();
+ public alter_table_args getEmptyArgsInstance() {
+ return new alter_table_args();
}
- public AsyncMethodCallback<Integer> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+ public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
final org.apache.thrift.AsyncProcessFunction fcall = this;
- return new AsyncMethodCallback<Integer>() {
- public void onComplete(Integer o) {
- add_partitions_result result = new add_partitions_result();
- result.success = o;
- result.setSuccessIsSet(true);
+ return new AsyncMethodCallback<Void>() {
+ public void onComplete(Void o) {
+ alter_table_result result = new alter_table_result();
try {
fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
return;
@@ -16140,20 +15972,15 @@ public class ThriftHiveMetastore {
public void onError(Exception e) {
byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
org.apache.thrift.TBase msg;
- add_partitions_result result = new add_partitions_result();
- if (e instanceof InvalidObjectException) {
- result.o1 = (InvalidObjectException) e;
+ alter_table_result result = new alter_table_result();
+ if (e instanceof InvalidOperationException) {
+ result.o1 = (InvalidOperationException) e;
result.setO1IsSet(true);
msg = result;
}
- else if (e instanceof AlreadyExistsException) {
- result.o2 = (AlreadyExistsException) e;
- result.setO2IsSet(true);
- msg = result;
- }
else if (e instanceof MetaException) {
- result.o3 = (MetaException) e;
- result.setO3IsSet(true);
+ result.o2 = (MetaException) e;
+ result.setO2IsSet(true);
msg = result;
}
else
@@ -16176,27 +16003,25 @@ public class ThriftHiveMetastore {
return false;
}
- public void start(I iface, add_partitions_args args, org.apache.thrift.async.AsyncMethodCallback<Integer> resultHandler) throws TException {
- iface.add_partitions(args.new_parts,resultHandler);
+ public void start(I iface, alter_table_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
+ iface.alter_table(args.dbname, args.tbl_name, args.new_tbl,resultHandler);
}
}
- public static class add_partitions_pspec<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, add_partitions_pspec_args, Integer> {
- public add_partitions_pspec() {
- super("add_partitions_pspec");
+ public static class alter_table_with_environment_context<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, alter_table_with_environment_context_args, Void> {
+ public alter_table_with_environment_context() {
+ super("alter_table_with_environment_context");
}
- public add_partitions_pspec_args getEmptyArgsInstance() {
- return new add_partitions_pspec_args();
+ public alter_table_with_environment_context_args getEmptyArgsInstance() {
+ return new alter_table_with_environment_context_args();
}
- public AsyncMethodCallback<Integer> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+ public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
final org.apache.thrift.AsyncProcessFunction fcall = this;
- return new AsyncMethodCallback<Integer>() {
- public void onComplete(Integer o) {
- add_partitions_pspec_result result = new add_partitions_pspec_result();
- result.success = o;
- result.setSuccessIsSet(true);
+ return new AsyncMethodCallback<Void>() {
+ public void onComplete(Void o) {
+ alter_table_with_environment_context_result result = new alter_table_with_environment_context_result();
try {
fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
return;
@@ -16208,20 +16033,15 @@ public class ThriftHiveMetastore {
public void onError(Exception e) {
byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
org.apache.thrift.TBase msg;
- add_partitions_pspec_result result = new add_partitions_pspec_result();
- if (e instanceof InvalidObjectException) {
- result.o1 = (InvalidObjectException) e;
+ alter_table_with_environment_context_result result = new alter_table_with_environment_context_result();
+ if (e instanceof InvalidOperationException) {
+ result.o1 = (InvalidOperationException) e;
result.setO1IsSet(true);
msg = result;
}
- else if (e instanceof AlreadyExistsException) {
- result.o2 = (AlreadyExistsException) e;
- result.setO2IsSet(true);
- msg = result;
- }
else if (e instanceof MetaException) {
- result.o3 = (MetaException) e;
- result.setO3IsSet(true);
+ result.o2 = (MetaException) e;
+ result.setO2IsSet(true);
msg = result;
}
else
@@ -16244,26 +16064,25 @@ public class ThriftHiveMetastore {
return false;
}
- public void start(I iface, add_partitions_pspec_args args, org.apache.thrift.async.AsyncMethodCallback<Integer> resultHandler) throws TException {
- iface.add_partitions_pspec(args.new_parts,resultHandler);
+ public void start(I iface, alter_table_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
+ iface.alter_table_with_environment_context(args.dbname, args.tbl_name, args.new_tbl, args.environment_context,resultHandler);
}
}
- public static class append_partition<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, append_partition_args, Partition> {
- public append_partition() {
- super("append_partition");
+ public static class alter_table_with_cascade<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, alter_table_with_cascade_args, Void> {
+ public alter_table_with_cascade() {
+ super("alter_table_with_cascade");
}
- public append_partition_args getEmptyArgsInstance() {
- return new append_partition_args();
+ public alter_table_with_cascade_args getEmptyArgsInstance() {
+ return new alter_table_with_cascade_args();
}
- public AsyncMethodCallback<Partition> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+ public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
final org.apache.thrift.AsyncProcessFunction fcall = this;
- return new AsyncMethodCallback<Partition>() {
- public void onComplete(Partition o) {
- append_partition_result result = new append_partition_result();
- result.success = o;
+ return new AsyncMethodCallback<Void>() {
+ public void onComplete(Void o) {
+ alter_table_with_cascade_result result = new alter_table_with_cascade_result();
try {
fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
return;
@@ -16275,20 +16094,15 @@ public class ThriftHiveMetastore {
public void onError(Exception e) {
byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
org.apache.thrift.TBase msg;
- append_partition_result result = new append_partition_result();
- if (e instanceof InvalidObjectException) {
- result.o1 = (InvalidObjectException) e;
+ alter_table_with_cascade_result result = new alter_table_with_cascade_result();
+ if (e instanceof InvalidOperationException) {
+ result.o1 = (InvalidOperationException) e;
result.setO1IsSet(true);
msg = result;
}
- else if (e instanceof AlreadyExistsException) {
- result.o2 = (AlreadyExistsException) e;
- result.setO2IsSet(true);
- msg = result;
- }
else if (e instanceof MetaException) {
- result.o3 = (MetaException) e;
- result.setO3IsSet(true);
+ result.o2 = (MetaException) e;
+ result.setO2IsSet(true);
msg = result;
}
else
@@ -16311,25 +16125,25 @@ public class ThriftHiveMetastore {
return false;
}
- public void start(I iface, append_partition_args args, org.apache.thrift.async.AsyncMethodCallback<Partition> resultHandler) throws TException {
- iface.append_partition(args.db_name, args.tbl_name, args.part_vals,resultHandler);
+ public void start(I iface, alter_table_with_cascade_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
+ iface.alter_table_with_cascade(args.dbname, args.tbl_name, args.new_tbl, args.cascade,resultHandler);
}
}
- public static class add_partitions_req<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, add_partitions_req_args, AddPartitionsResult> {
- public add_partitions_req() {
- super("add_partitions_req");
+ public static class add_partition<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, add_partition_args, Partition> {
+ public add_partition() {
+ super("add_partition");
}
- public add_partitions_req_args getEmptyArgsInstance() {
- return new add_partitions_req_args();
+ public add_partition_args getEmptyArgsInstance() {
+ return new add_partition_args();
}
- public AsyncMethodCallback<AddPartitionsResult> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+ public AsyncMethodCallback<Partition> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
final org.apache.thrift.AsyncProcessFunction fcall = this;
- return new AsyncMethodCallback<AddPartitionsResult>() {
- public void onComplete(AddPartitionsResult o) {
- add_partitions_req_result result = new add_partitions_req_result();
+ return new AsyncMethodCallback<Partition>() {
+ public void onComplete(Partition o) {
+ add_partition_result result = new add_partition_result();
result.success = o;
try {
fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
@@ -16342,7 +16156,7 @@ public class ThriftHiveMetastore {
public void onError(Exception e) {
byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
org.apache.thrift.TBase msg;
- add_partitions_req_result result = new add_partitions_req_result();
+ add_partition_result result = new add_partition_result();
if (e instanceof InvalidObjectException) {
result.o1 = (InvalidObjectException) e;
result.setO1IsSet(true);
@@ -16378,25 +16192,25 @@ public class ThriftHiveMetastore {
return false;
}
- public void start(I iface, add_partitions_req_args args, org.apache.thrift.async.AsyncMethodCallback<AddPartitionsResult> resultHandler) throws TException {
- iface.add_partitions_req(args.request,resultHandler);
+ public void start(I iface, add_partition_args args, org.apache.thrift.async.AsyncMethodCallback<Partition> resultHandler) throws TException {
+ iface.add_partition(args.new_part,resultHandler);
}
}
- public static class append_partition_with_environment_context<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, append_partition_with_environment_context_args, Partition> {
- public append_partition_with_environment_context() {
- super("append_partition_with_environment_context");
+ public static class add_partition_with_environment_context<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, add_partition_with_environment_context_args, Partition> {
+ public add_partition_with_environment_context() {
+ super("add_partition_with_environment_context");
}
- public append_partition_with_environment_context_args getEmptyArgsInstance() {
- return new append_partition_with_environment_context_args();
+ public add_partition_with_environment_context_args getEmptyArgsInstance() {
+ return new add_partition_with_environment_context_args();
}
public AsyncMethodCallback<Partition> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
final org.apache.thrift.AsyncProcessFunction fcall = this;
return new AsyncMethodCallback<Partition>() {
public void onComplete(Partition o) {
- append_partition_with_environment_context_result result = new append_partition_with_environment_context_result();
+ add_partition_with_environment_context_result result = new add_partition_with_environment_context_result();
result.success = o;
try {
fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
@@ -16409,7 +16223,7 @@ public class ThriftHiveMetastore {
public void onError(Exception e) {
byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
org.apache.thrift.TBase msg;
- append_partition_with_environment_context_result result = new append_partition_with_environment_context_result();
+ add_partition_with_environment_context_result result = new add_partition_with_environment_context_result();
if (e instanceof InvalidObjectException) {
result.o1 = (InvalidObjectException) e;
result.setO1IsSet(true);
@@ -16445,26 +16259,27 @@ public class ThriftHiveMetastore {
return false;
}
- public void start(I iface, append_partition_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback<Partition> resultHandler) throws TException {
- iface.append_partition_with_environment_context(args.db_name, args.tbl_name, args.part_vals, args.environment_context,resultHandler);
+ public void start(I iface, add_partition_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback<Partition> resultHandler) throws TException {
+ iface.add_partition_with_environment_context(args.new_part, args.environment_context,resultHandler);
}
}
- public static class append_partition_by_name<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, append_partition_by_name_args, Partition> {
- public append_partition_by_name() {
- super("append_partition_by_name");
+ public static class add_partitions<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, add_partitions_args, Integer> {
+ public add_partitions() {
+ super("add_partitions");
}
- public append_partition_by_name_args getEmptyArgsInstance() {
- return new append_partition_by_name_args();
+ public add_partitions_args getEmptyArgsInstance() {
+ return new add_partitions_args();
}
- public AsyncMethodCallback<Partition> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+ public AsyncMethodCallback<Integer> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
final org.apache.thrift.AsyncProcessFunction fcall = this;
- return new AsyncMethodCallback<Partition>() {
- public void onComplete(Partition o) {
- append_partition_by_name_result result = new append_partition_by_name_result();
+ return new AsyncMethodCallback<Integer>() {
+ public void onComplete(Integer o) {
+ add_partitions_result result = new add_partitions_result();
result.success = o;
+ result.setSuccessIsSet(true);
try {
fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
return;
@@ -16476,7 +16291,7 @@ public class ThriftHiveMetastore {
public void onError(Exception e) {
byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
org.apache.thrift.TBase msg;
- append_partition_by_name_result result = new append_partition_by_name_result();
+ add_partitions_result result = new add_partitions_result();
if (e instanceof InvalidObjectException) {
result.o1 = (InvalidObjectException) e;
result.setO1IsSet(true);
@@ -16512,26 +16327,27 @@ public class ThriftHiveMetastore {
return false;
}
- public void start(I iface, append_partition_by_name_args args, org.apache.thrift.async.AsyncMethodCallback<Partition> resultHandler) throws TException {
- iface.append_partition_by_name(args.db_name, args.tbl_name, args.part_name,resultHandler);
+ public void start(I iface, add_partitions_args args, org.apache.thrift.async.AsyncMethodCallback<Integer> resultHandler) throws TException {
+ iface.add_partitions(args.new_parts,resultHandler);
}
}
- public static class append_partition_by_name_with_environment_context<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, append_partition_by_name_with_environment_context_args, Partition> {
- public append_partition_by_name_with_environment_context() {
- super("append_partition_by_name_with_environment_context");
+ public static class add_partitions_pspec<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, add_partitions_pspec_args, Integer> {
+ public add_partitions_pspec() {
+ super("add_partitions_pspec");
}
- public append_partition_by_name_with_environment_context_args getEmptyArgsInstance() {
- return new append_partition_by_name_with_environment_context_args();
+ public add_partitions_pspec_args getEmptyArgsInstance() {
+ return new add_partitions_pspec_args();
}
- public AsyncMethodCallback<Partition> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+ public AsyncMethodCallback<Integer> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
final org.apache.thrift.AsyncProcessFunction fcall = this;
- return new AsyncMethodCallback<Partition>() {
- public void onComplete(Partition o) {
- append_partition_by_name_with_environment_context_result result = new append_partition_by_name_with_environment_context_result();
+ return new AsyncMethodCallback<Integer>() {
+ public void onComplete(Integer o) {
+ add_partitions_pspec_result result = new add_partitions_pspec_result();
result.success = o;
+ result.setSuccessIsSet(true);
try {
fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
return;
@@ -16543,7 +16359,342 @@ public class ThriftHiveMetastore {
public void onError(Exception e) {
byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
org.apache.thrift.TBase msg;
- append_partition_by_name_with_environment_context_result result = new append_partition_by_name_with_environment_context_result();
+ add_partitions_pspec_result result = new add_partitions_pspec_result();
+ if (e instanceof InvalidObjectException) {
+ result.o1 = (InvalidObjectException) e;
+ result.setO1IsSet(true);
+ msg = result;
+ }
+ else if (e instanceof AlreadyExistsException) {
+ result.o2 = (AlreadyExistsException) e;
+ result.setO2IsSet(true);
+ msg = result;
+ }
+ else if (e instanceof MetaException) {
+ result.o3 = (MetaException) e;
+ result.setO3IsSet(true);
+ msg = result;
+ }
+ else
+ {
+ msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+ msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+ }
+ try {
+ fcall.sendResponse(fb,msg,msgType,seqid);
+ return;
+ } catch (Exception ex) {
+ LOGGER.error("Exception writing to internal frame buffer", ex);
+ }
+ fb.close();
+ }
+ };
+ }
+
+ protected boolean isOneway() {
+ return false;
+ }
+
+ public void start(I iface, add_partitions_pspec_args args, org.apache.thrift.async.AsyncMethodCallback<Integer> resultHandler) throws TException {
+ iface.add_partitions_pspec(args.new_parts,resultHandler);
+ }
+ }
+
+ public static class append_partition<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, append_partition_args, Partition> {
+ public append_partition() {
+ super("append_partition");
+ }
+
+ public append_partition_args getEmptyArgsInstance() {
+ return new append_partition_args();
+ }
+
+ public AsyncMethodCallback<Partition> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+ final org.apache.thrift.AsyncProcessFunction fcall = this;
+ return new AsyncMethodCallback<Partition>() {
+ public void onComplete(Partition o) {
+ append_partition_result result = new append_partition_result();
+ result.success = o;
+ try {
+ fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+ return;
+ } catch (Exception e) {
+ LOGGER.error("Exception writing to internal frame buffer", e);
+ }
+ fb.close();
+ }
+ public void onError(Exception e) {
+ byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+ org.apache.thrift.TBase msg;
+ append_partition_result result = new append_partition_result();
+ if (e instanceof InvalidObjectException) {
+ result.o1 = (InvalidObjectException) e;
+ result.setO1IsSet(true);
+ msg = result;
+ }
+ else if (e instanceof AlreadyExistsException) {
+ result.o2 = (AlreadyExistsException) e;
+ result.setO2IsSet(true);
+ msg = result;
+ }
+ else if (e instanceof MetaException) {
+ result.o3 = (MetaException) e;
+ result.setO3IsSet(true);
+ msg = result;
+ }
+ else
+ {
+ msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+ msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+ }
+ try {
+ fcall.sendResponse(fb,msg,msgType,seqid);
+ return;
+ } catch (Exception ex) {
+ LOGGER.error("Exception writing to internal frame buffer", ex);
+ }
+ fb.close();
+ }
+ };
+ }
+
+ protected boolean isOneway() {
+ return false;
+ }
+
+ public void start(I iface, append_partition_args args, org.apache.thrift.async.AsyncMethodCallback<Partition> resultHandler) throws TException {
+ iface.append_partition(args.db_name, args.tbl_name, args.part_vals,resultHandler);
+ }
+ }
+
+ public static class add_partitions_req<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, add_partitions_req_args, AddPartitionsResult> {
+ public add_partitions_req() {
+ super("add_partitions_req");
+ }
+
+ public add_partitions_req_args getEmptyArgsInstance() {
+ return new add_partitions_req_args();
+ }
+
+ public AsyncMethodCallback<AddPartitionsResult> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+ final org.apache.thrift.AsyncProcessFunction fcall = this;
+ return new AsyncMethodCallback<AddPartitionsResult>() {
+ public void onComplete(AddPartitionsResult o) {
+ add_partitions_req_result result = new add_partitions_req_result();
+ result.success = o;
+ try {
+ fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+ return;
+ } catch (Exception e) {
+ LOGGER.error("Exception writing to internal frame buffer", e);
+ }
+ fb.close();
+ }
+ public void onError(Exception e) {
+ byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+ org.apache.thrift.TBase msg;
+ add_partitions_req_result result = new add_partitions_req_result();
+ if (e instanceof InvalidObjectException) {
+ result.o1 = (InvalidObjectException) e;
+ result.setO1IsSet(true);
+ msg = result;
+ }
+ else if (e instanceof AlreadyExistsException) {
+ result.o2 = (AlreadyExistsException) e;
+ result.setO2IsSet(true);
+ msg = result;
+ }
+ else if (e instanceof MetaException) {
+ result.o3 = (MetaException) e;
+ result.setO3IsSet(true);
+ msg = result;
+ }
+ else
+ {
+ msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+ msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+ }
+ try {
+ fcall.sendResponse(fb,msg,msgType,seqid);
+ return;
+ } catch (Exception ex) {
+ LOGGER.error("Exception writing to internal frame buffer", ex);
+ }
+ fb.close();
+ }
+ };
+ }
+
+ protected boolean isOneway() {
+ return false;
+ }
+
+ public void start(I iface, add_partitions_req_args args, org.apache.thrift.async.AsyncMethodCallback<AddPartitionsResult> resultHandler) throws TException {
+ iface.add_partitions_req(args.request,resultHandler);
+ }
+ }
+
+ public static class append_partition_with_environment_context<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, append_partition_with_environment_context_args, Partition> {
+ public append_partition_with_environment_context() {
+ super("append_partition_with_environment_context");
+ }
+
+ public append_partition_with_environment_context_args getEmptyArgsInstance() {
+ return new append_partition_with_environment_context_args();
+ }
+
+ public AsyncMethodCallback<Partition> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+ final org.apache.thrift.AsyncProcessFunction fcall = this;
+ return new AsyncMethodCallback<Partition>() {
+ public void onComplete(Partition o) {
+ append_partition_with_environment_context_result result = new append_partition_with_environment_context_result();
+ result.success = o;
+ try {
+ fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+ return;
+ } catch (Exception e) {
+ LOGGER.error("Exception writing to internal frame buffer", e);
+ }
+ fb.close();
+ }
+ public void onError(Exception e) {
+ byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+ org.apache.thrift.TBase msg;
+ append_partition_with_environment_context_result result = new append_partition_with_environment_context_result();
+ if (e instanceof InvalidObjectException) {
+ result.o1 = (InvalidObjectException) e;
+ result.setO1IsSet(true);
+ msg = result;
+ }
+ else if (e instanceof AlreadyExistsException) {
+ result.o2 = (AlreadyExistsException) e;
+ result.setO2IsSet(true);
+ msg = result;
+ }
+ else if (e instanceof MetaException) {
+ result.o3 = (MetaException) e;
+ result.setO3IsSet(true);
+ msg = result;
+ }
+ else
+ {
+ msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+ msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+ }
+ try {
+ fcall.sendResponse(fb,msg,msgType,seqid);
+ return;
+ } catch (Exception ex) {
+ LOGGER.error("Exception writing to internal frame buffer", ex);
+ }
+ fb.close();
+ }
+ };
+ }
+
+ protected boolean isOneway() {
+ return false;
+ }
+
+ public void start(I iface, append_partition_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback<Partition> resultHandler) throws TException {
+ iface.append_partition_with_environment_context(args.db_name, args.tbl_name, args.part_vals, args.environment_context,resultHandler);
+ }
+ }
+
+ public static class append_partition_by_name<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, append_partition_by_name_args, Partition> {
+ public append_partition_by_name() {
+ super("append_partition_by_name");
+ }
+
+ public append_partition_by_name_args getEmptyArgsInstance() {
+ return new append_partition_by_name_args();
+ }
+
+ public AsyncMethodCallback<Partition> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+ final org.apache.thrift.AsyncProcessFunction fcall = this;
+ return new AsyncMethodCallback<Partition>() {
+ public void onComplete(Partition o) {
+ append_partition_by_name_result result = new append_partition_by_name_result();
+ result.success = o;
+ try {
+ fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+ return;
+ } catch (Exception e) {
+ LOGGER.error("Exception writing to internal frame buffer", e);
+ }
+ fb.close();
+ }
+ public void onError(Exception e) {
+ byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+ org.apache.thrift.TBase msg;
+ append_partition_by_name_result result = new append_partition_by_name_result();
+ if (e instanceof InvalidObjectException) {
+ result.o1 = (InvalidObjectException) e;
+ result.setO1IsSet(true);
+ msg = result;
+ }
+ else if (e instanceof AlreadyExistsException) {
+ result.o2 = (AlreadyExistsException) e;
+ result.setO2IsSet(true);
+ msg = result;
+ }
+ else if (e instanceof MetaException) {
+ result.o3 = (MetaException) e;
+ result.setO3IsSet(true);
+ msg = result;
+ }
+ else
+ {
+ msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+ msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+ }
+ try {
+ fcall.sendResponse(fb,msg,msgType,seqid);
+ return;
+ } catch (Exception ex) {
+ LOGGER.error("Exception writing to internal frame buffer", ex);
+ }
+ fb.close();
+ }
+ };
+ }
+
+ protected boolean isOneway() {
+ return false;
+ }
+
+ public void start(I iface, append_partition_by_name_args args, org.apache.thrift.async.AsyncMethodCallback<Partition> resultHandler) throws TException {
+ iface.append_partition_by_name(args.db_name, args.tbl_name, args.part_name,resultHandler);
+ }
+ }
+
+ public static class append_partition_by_name_with_environment_context<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, append_partition_by_name_with_environment_context_args, Partition> {
+ public append_partition_by_name_with_environment_context() {
+ super("append_partition_by_name_with_environment_context");
+ }
+
+ public append_partition_by_name_with_environment_context_args getEmptyArgsInstance() {
+ return new append_partition_by_name_with_environment_context_args();
+ }
+
+ public AsyncMethodCallback<Partition> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+ final org.apache.thrift.AsyncProcessFunction fcall = this;
+ return new AsyncMethodCallback<Partition>() {
+ public void onComplete(Partition o) {
+ append_partition_by_name_with_environment_context_result result = new append_partition_by_name_with_environment_context_result();
+ result.success = o;
+ try {
+ fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+ return;
+ } catch (Exception e) {
+ LOGGER.error("Exception writing to internal frame buffer", e);
+ }
+ fb.close();
+ }
+ public void onError(Exception e) {
+ byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+ org.apache.thrift.TBase msg;
+ append_partition_by_name_with_environment_context_result result = new append_partition_by_name_with_environment_context_result();
if (e instanceof InvalidObjectException) {
result.o1 = (InvalidObjectException) e;
result.setO1IsSet(true);
@@ -42513,6 +42664,835 @@ public class ThriftHiveMetastore {
}
+ public static class drop_constraint_args implements org.apache.thrift.TBase<drop_constraint_args, drop_constraint_args._Fields>, java.io.Serializable, Cloneable, Comparable<drop_constraint_args> {
+ private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("drop_constraint_args");
+
+ private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1);
+
+ private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+ static {
+ schemes.put(StandardScheme.class, new drop_constraint_argsStandardSchemeFactory());
+ schemes.put(TupleScheme.class, new drop_constraint_argsTupleSchemeFactory());
+ }
+
+ private DropConstraintRequest req; // required
+
+ /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+ REQ((short)1, "req");
+
+ private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+ static {
+ for (_Fields field : EnumSet.allOf(_Fields.class)) {
+ byName.put(field.getFieldName(), field);
+ }
+ }
+
+ /**
+ * Find the _Fields constant that matches fieldId, or null if its not found.
+ */
+ public static _Fields findByThriftId(int fieldId) {
+ switch(fieldId) {
+ case 1: // REQ
+ return REQ;
+ default:
+ return null;
+ }
+ }
+
+ /**
+ * Find the _Fields constant that matches fieldId, throwing an exception
+ * if it is not found.
+ */
+ public static _Fields findByThriftIdOrThrow(int fieldId) {
+ _Fields fields = findByThriftId(fieldId);
+ if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+ return fields;
+ }
+
+ /**
+ * Find the _Fields constant that matches name, or null if its not found.
+ */
+ public static _Fields findByName(String name) {
+ return byName.get(name);
+ }
+
+ private final short _thriftId;
+ private final String _fieldName;
+
+ _Fields(short thriftId, String fieldName) {
+ _thriftId = thriftId;
+ _fieldName = fieldName;
+ }
+
+ public short getThriftFieldId() {
+ return _thriftId;
+ }
+
+ public String getFieldName() {
+ return _fieldName;
+ }
+ }
+
+ // isset id assignments
+ public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+ static {
+ Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+ tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT,
+ new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, DropConstraintRequest.class)));
+ metaDataMap = Collections.unmodifiableMap(tmpMap);
+ org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(drop_constraint_args.class, metaDataMap);
+ }
+
+ public drop_constraint_args() {
+ }
+
+ public drop_constraint_args(
+ DropConstraintRequest req)
+ {
+ this();
+ this.req = req;
+ }
+
+ /**
+ * Performs a deep copy on <i>other</i>.
+ */
+ public drop_constraint_args(drop_constraint_args other) {
+ if (other.isSetReq()) {
+ this.req = new DropConstraintRequest(other.req);
+ }
+ }
+
+ public drop_constraint_args deepCopy() {
+ return new drop_constraint_args(this);
+ }
+
+ @Override
+ public void clear() {
+ this.req = null;
+ }
+
+ public DropConstraintRequest getReq() {
+ return this.req;
+ }
+
+ public void setReq(DropConstraintRequest req) {
+ this.req = req;
+ }
+
+ public void unsetReq() {
+ this.req = null;
+ }
+
+ /** Returns true if field req is set (has been assigned a value) and false otherwise */
+ public boolean isSetReq() {
+ return this.req != null;
+ }
+
+ public void setReqIsSet(boolean value) {
+ if (!value) {
+ this.req = null;
+ }
+ }
+
+ public void setFieldValue(_Fields field, Object value) {
+ switch (field) {
+ case REQ:
+ if (value == null) {
+ unsetReq();
+ } else {
+ setReq((DropConstraintRequest)value);
+ }
+ break;
+
+ }
+ }
+
+ public Object getFieldValue(_Fields field) {
+ switch (field) {
+ case REQ:
+ return getReq();
+
+ }
+ throw new IllegalStateException();
+ }
+
+ /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+ public boolean isSet(_Fields field) {
+ if (field == null) {
+ throw new IllegalArgumentException();
+ }
+
+ switch (field) {
+ case REQ:
+ return isSetReq();
+ }
+ throw new IllegalStateException();
+ }
+
+ @Override
+ public boolean equals(Object that) {
+ if (that == null)
+ return false;
+ if (that instanceof drop_constraint_args)
+ return this.equals((drop_constraint_args)that);
+ return false;
+ }
+
+ public boolean equals(drop_constraint_args that) {
+ if (that == null)
+ return false;
+
+ boolean this_present_req = true && this.isSetReq();
+ boolean that_present_req = true && that.isSetReq();
+ if (this_present_req || that_present_req) {
+ if (!(this_present_req && that_present_req))
+ return false;
+ if (!this.req.equals(that.req))
+ return false;
+ }
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ List<Object> list = new ArrayList<Object>();
+
+ boolean present_req = true && (isSetReq());
+ list.add(present_req);
+ if (present_req)
+ list.add(req);
+
+ return list.hashCode();
+ }
+
+ @Override
+ public int compareTo(drop_constraint_args other) {
+ if (!getCl
<TRUNCATED>
[14/20] hive git commit: HIVE-12837 : Better memory
estimation/allocation for hybrid grace hash join during hash table loading
(Wei Zheng, reviewed by Vikram Dixit K)
Posted by jd...@apache.org.
HIVE-12837 : Better memory estimation/allocation for hybrid grace hash join during hash table loading (Wei Zheng, reviewed by Vikram Dixit K)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/cbebb4d7
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/cbebb4d7
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/cbebb4d7
Branch: refs/heads/llap
Commit: cbebb4d78064a9098e4145a0f7532f08885c9b27
Parents: a88050b
Author: Wei Zheng <we...@apache.org>
Authored: Wed May 4 23:09:08 2016 -0700
Committer: Wei Zheng <we...@apache.org>
Committed: Wed May 4 23:09:08 2016 -0700
----------------------------------------------------------------------
.../persistence/HybridHashTableContainer.java | 60 +++++++++++++++-----
.../ql/exec/persistence/KeyValueContainer.java | 4 ++
2 files changed, 51 insertions(+), 13 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/cbebb4d7/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java
index f5da5a4..5552dfb 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java
@@ -90,6 +90,7 @@ public class HybridHashTableContainer
private boolean lastPartitionInMem; // only one (last one) partition is left in memory
private final int memoryCheckFrequency; // how often (# of rows apart) to check if memory is full
private final HybridHashTableConf nwayConf; // configuration for n-way join
+ private int writeBufferSize; // write buffer size for BytesBytesMultiHashMap
/** The OI used to deserialize values. We never deserialize keys. */
private LazyBinaryStructObjectInspector internalValueOi;
@@ -294,7 +295,6 @@ public class HybridHashTableContainer
this.spillLocalDirs = spillLocalDirs;
this.nwayConf = nwayConf;
- int writeBufferSize;
int numPartitions;
if (nwayConf == null) { // binary join
numPartitions = calcNumPartitions(memoryThreshold, estimatedTableSize, minNumParts, minWbSize);
@@ -327,7 +327,9 @@ public class HybridHashTableContainer
writeBufferSize : Integer.highestOneBit(writeBufferSize);
// Cap WriteBufferSize to avoid large preallocations
- writeBufferSize = writeBufferSize < minWbSize ? minWbSize : Math.min(maxWbSize, writeBufferSize);
+ // We also want to limit the size of writeBuffer, because we normally have 16 partitions, that
+ // makes spilling prediction (isMemoryFull) to be too defensive which results in unnecessary spilling
+ writeBufferSize = writeBufferSize < minWbSize ? minWbSize : Math.min(maxWbSize / numPartitions, writeBufferSize);
this.bloom1 = new BloomFilter(newKeyCount);
@@ -417,6 +419,11 @@ public class HybridHashTableContainer
for (HashPartition hp : hashPartitions) {
if (hp.hashMap != null) {
memUsed += hp.hashMap.memorySize();
+ } else {
+ // also include the still-in-memory sidefile, before it has been truely spilled
+ if (hp.sidefileKVContainer != null) {
+ memUsed += hp.sidefileKVContainer.numRowsInReadBuffer() * tableRowSize;
+ }
}
}
return memoryUsed = memUsed;
@@ -454,6 +461,8 @@ public class HybridHashTableContainer
private MapJoinKey internalPutRow(KeyValueHelper keyValueHelper,
Writable currentKey, Writable currentValue) throws SerDeException, IOException {
+ boolean putToSidefile = false; // by default we put row into partition in memory
+
// Next, put row into corresponding hash partition
int keyHash = keyValueHelper.getHashFromKey();
int partitionId = keyHash & (hashPartitions.length - 1);
@@ -461,15 +470,13 @@ public class HybridHashTableContainer
bloom1.addLong(keyHash);
- if (isOnDisk(partitionId) || isHashMapSpilledOnCreation(partitionId)) {
- KeyValueContainer kvContainer = hashPartition.getSidefileKVContainer();
- kvContainer.add((HiveKey) currentKey, (BytesWritable) currentValue);
- } else {
- hashPartition.hashMap.put(keyValueHelper, keyHash); // Pass along hashcode to avoid recalculation
- totalInMemRowCount++;
-
- if ((totalInMemRowCount & (this.memoryCheckFrequency - 1)) == 0 && // check periodically
- !lastPartitionInMem) { // If this is the only partition in memory, proceed without check
+ if (isOnDisk(partitionId) || isHashMapSpilledOnCreation(partitionId)) { // destination on disk
+ putToSidefile = true;
+ } else { // destination in memory
+ if (!lastPartitionInMem && // If this is the only partition in memory, proceed without check
+ (hashPartition.size() == 0 || // Destination partition being empty indicates a write buffer
+ // will be allocated, thus need to check if memory is full
+ (totalInMemRowCount & (this.memoryCheckFrequency - 1)) == 0)) { // check periodically
if (isMemoryFull()) {
if ((numPartitionsSpilled == hashPartitions.length - 1) ) {
LOG.warn("This LAST partition in memory won't be spilled!");
@@ -479,9 +486,16 @@ public class HybridHashTableContainer
int biggest = biggestPartition();
spillPartition(biggest);
this.setSpill(true);
+ if (partitionId == biggest) { // destination hash partition has just be spilled
+ putToSidefile = true;
+ }
} else { // n-way join
LOG.info("N-way spilling: spill tail partition from previously loaded small tables");
+ int biggest = nwayConf.getNextSpillPartition();
memoryThreshold += nwayConf.spill();
+ if (biggest != 0 && partitionId == biggest) { // destination hash partition has just be spilled
+ putToSidefile = true;
+ }
LOG.info("Memory threshold has been increased to: " + memoryThreshold);
}
numPartitionsSpilled++;
@@ -490,6 +504,15 @@ public class HybridHashTableContainer
}
}
+ // Now we know where to put row
+ if (putToSidefile) {
+ KeyValueContainer kvContainer = hashPartition.getSidefileKVContainer();
+ kvContainer.add((HiveKey) currentKey, (BytesWritable) currentValue);
+ } else {
+ hashPartition.hashMap.put(keyValueHelper, keyHash); // Pass along hashcode to avoid recalculation
+ totalInMemRowCount++;
+ }
+
return null; // there's no key to return
}
@@ -513,11 +536,21 @@ public class HybridHashTableContainer
}
/**
- * Check if the memory threshold is reached
+ * Check if the memory threshold is about to be reached.
+ * Since all the write buffer will be lazily allocated in BytesBytesMultiHashMap, we need to
+ * consider those as well.
* @return true if memory is full, false if not
*/
private boolean isMemoryFull() {
- return refreshMemoryUsed() >= memoryThreshold;
+ int numPartitionsInMem = 0;
+
+ for (HashPartition hp : hashPartitions) {
+ if (!hp.isHashMapOnDisk()) {
+ numPartitionsInMem++;
+ }
+ }
+
+ return refreshMemoryUsed() + writeBufferSize * numPartitionsInMem >= memoryThreshold;
}
/**
@@ -561,6 +594,7 @@ public class HybridHashTableContainer
new com.esotericsoftware.kryo.io.Output(outputStream);
Kryo kryo = SerializationUtilities.borrowKryo();
try {
+ LOG.info("Trying to spill hash partition " + partitionId + " ...");
kryo.writeObject(output, partition.hashMap); // use Kryo to serialize hashmap
output.close();
outputStream.close();
http://git-wip-us.apache.org/repos/asf/hive/blob/cbebb4d7/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/KeyValueContainer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/KeyValueContainer.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/KeyValueContainer.java
index e2b22d3..72faf8b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/KeyValueContainer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/KeyValueContainer.java
@@ -215,6 +215,10 @@ public class KeyValueContainer {
return row;
}
+ public int numRowsInReadBuffer() {
+ return rowsInReadBuffer;
+ }
+
public int size() {
return rowsInReadBuffer + rowsOnDisk;
}
[18/20] hive git commit: HIVE-13639: CBO rule to pull up constants
through Union (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)
Posted by jd...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/09271872/ql/src/test/results/clientpositive/perf/query75.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query75.q.out b/ql/src/test/results/clientpositive/perf/query75.q.out
index 15c46c2..731ff62 100644
--- a/ql/src/test/results/clientpositive/perf/query75.q.out
+++ b/ql/src/test/results/clientpositive/perf/query75.q.out
@@ -41,363 +41,367 @@ Stage-0
<-Reducer 7 [SIMPLE_EDGE]
SHUFFLE [RS_153]
Select Operator [SEL_152] (rows=169103 width=1436)
- Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9"]
+ Output:["_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9"]
Filter Operator [FIL_151] (rows=169103 width=1436)
predicate:(UDFToDouble((CAST( _col5 AS decimal(17,2)) / CAST( _col12 AS decimal(17,2)))) < 0.9)
Merge Join Operator [MERGEJOIN_259] (rows=507310 width=1436)
- Conds:RS_148._col1, _col2, _col3, _col4=RS_149._col1, _col2, _col3, _col4(Inner),Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col12","_col13"]
+ Conds:RS_148._col1, _col2, _col3, _col4=RS_149._col1, _col2, _col3, _col4(Inner),Output:["_col1","_col2","_col3","_col4","_col5","_col6","_col12","_col13"]
<-Reducer 31 [SIMPLE_EDGE]
SHUFFLE [RS_149]
PartitionCols:_col1, _col2, _col3, _col4
- Group By Operator [GBY_146] (rows=461191 width=1436)
- Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)"],keys:KEY._col0, KEY._col1, KEY._col2, KEY._col3, KEY._col4
- <-Union 30 [SIMPLE_EDGE]
- <-Reducer 29 [CONTAINS]
- Reduce Output Operator [RS_145]
- PartitionCols:_col0, _col1, _col2, _col3, _col4
- Group By Operator [GBY_144] (rows=922383 width=1436)
- Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"],aggregations:["sum(_col5)","sum(_col6)"],keys:_col0, _col1, _col2, _col3, _col4
- Select Operator [SEL_142] (rows=922383 width=1436)
- Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
- Select Operator [SEL_95] (rows=307461 width=1436)
+ Select Operator [SEL_147] (rows=461191 width=1436)
+ Output:["_col1","_col2","_col3","_col4","_col5","_col6"]
+ Group By Operator [GBY_146] (rows=461191 width=1436)
+ Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)"],keys:2001, KEY._col1, KEY._col2, KEY._col3, KEY._col4
+ <-Union 30 [SIMPLE_EDGE]
+ <-Reducer 29 [CONTAINS]
+ Reduce Output Operator [RS_145]
+ PartitionCols:2001, _col1, _col2, _col3, _col4
+ Group By Operator [GBY_144] (rows=922383 width=1436)
+ Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"],aggregations:["sum(_col5)","sum(_col6)"],keys:2001, _col1, _col2, _col3, _col4
+ Select Operator [SEL_142] (rows=922383 width=1436)
Output:["_col1","_col2","_col3","_col4","_col5","_col6"]
- Merge Join Operator [MERGEJOIN_252] (rows=307461 width=1436)
- Conds:RS_92._col2, _col1=RS_93._col1, _col0(Left Outer),Output:["_col3","_col4","_col6","_col7","_col8","_col10","_col15","_col16"]
- <-Map 34 [SIMPLE_EDGE]
- SHUFFLE [RS_93]
- PartitionCols:_col1, _col0
- Select Operator [SEL_85] (rows=1 width=0)
- Output:["_col0","_col1","_col2","_col3"]
- Filter Operator [FIL_232] (rows=1 width=0)
- predicate:cr_item_sk is not null
- TableScan [TS_83] (rows=1 width=0)
- default@catalog_returns,catalog_returns,Tbl:PARTIAL,Col:NONE,Output:["cr_item_sk","cr_order_number","cr_return_quantity","cr_return_amount"]
- <-Reducer 28 [SIMPLE_EDGE]
- SHUFFLE [RS_92]
- PartitionCols:_col2, _col1
- Merge Join Operator [MERGEJOIN_251] (rows=279510 width=1436)
- Conds:RS_89._col0=RS_90._col0(Inner),Output:["_col1","_col2","_col3","_col4","_col6","_col7","_col8","_col10"]
- <-Map 33 [SIMPLE_EDGE]
- SHUFFLE [RS_90]
- PartitionCols:_col0
- Select Operator [SEL_82] (rows=36524 width=1119)
- Output:["_col0"]
- Filter Operator [FIL_231] (rows=36524 width=1119)
- predicate:((d_year = 2001) and d_date_sk is not null)
- TableScan [TS_80] (rows=73049 width=1119)
- default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year"]
- <-Reducer 27 [SIMPLE_EDGE]
- SHUFFLE [RS_89]
- PartitionCols:_col0
- Merge Join Operator [MERGEJOIN_250] (rows=254100 width=1436)
- Conds:RS_86._col1=RS_87._col0(Inner),Output:["_col0","_col1","_col2","_col3","_col4","_col6","_col7","_col8","_col10"]
- <-Map 26 [SIMPLE_EDGE]
- SHUFFLE [RS_86]
- PartitionCols:_col1
- Select Operator [SEL_76] (rows=1 width=0)
- Output:["_col0","_col1","_col2","_col3","_col4"]
- Filter Operator [FIL_229] (rows=1 width=0)
- predicate:(cs_item_sk is not null and cs_sold_date_sk is not null)
- TableScan [TS_74] (rows=1 width=0)
- default@catalog_sales,catalog_sales,Tbl:PARTIAL,Col:NONE,Output:["cs_sold_date_sk","cs_item_sk","cs_order_number","cs_quantity","cs_ext_sales_price"]
- <-Map 32 [SIMPLE_EDGE]
- SHUFFLE [RS_87]
- PartitionCols:_col0
- Select Operator [SEL_79] (rows=231000 width=1436)
- Output:["_col0","_col1","_col2","_col3","_col5"]
- Filter Operator [FIL_230] (rows=231000 width=1436)
- predicate:((i_category = 'Sports') and i_item_sk is not null and i_brand_id is not null and i_class_id is not null and i_category_id is not null and i_manufact_id is not null)
- TableScan [TS_77] (rows=462000 width=1436)
- default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_brand_id","i_class_id","i_category_id","i_category","i_manufact_id"]
- <-Reducer 38 [CONTAINS]
- Reduce Output Operator [RS_145]
- PartitionCols:_col0, _col1, _col2, _col3, _col4
- Group By Operator [GBY_144] (rows=922383 width=1436)
- Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"],aggregations:["sum(_col5)","sum(_col6)"],keys:_col0, _col1, _col2, _col3, _col4
- Select Operator [SEL_142] (rows=922383 width=1436)
- Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
- Select Operator [SEL_117] (rows=307461 width=1436)
+ Select Operator [SEL_95] (rows=307461 width=1436)
+ Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
+ Merge Join Operator [MERGEJOIN_252] (rows=307461 width=1436)
+ Conds:RS_92._col2, _col1=RS_93._col1, _col0(Left Outer),Output:["_col3","_col4","_col6","_col7","_col8","_col10","_col15","_col16"]
+ <-Map 34 [SIMPLE_EDGE]
+ SHUFFLE [RS_93]
+ PartitionCols:_col1, _col0
+ Select Operator [SEL_85] (rows=1 width=0)
+ Output:["_col0","_col1","_col2","_col3"]
+ Filter Operator [FIL_232] (rows=1 width=0)
+ predicate:cr_item_sk is not null
+ TableScan [TS_83] (rows=1 width=0)
+ default@catalog_returns,catalog_returns,Tbl:PARTIAL,Col:NONE,Output:["cr_item_sk","cr_order_number","cr_return_quantity","cr_return_amount"]
+ <-Reducer 28 [SIMPLE_EDGE]
+ SHUFFLE [RS_92]
+ PartitionCols:_col2, _col1
+ Merge Join Operator [MERGEJOIN_251] (rows=279510 width=1436)
+ Conds:RS_89._col0=RS_90._col0(Inner),Output:["_col1","_col2","_col3","_col4","_col6","_col7","_col8","_col10"]
+ <-Map 33 [SIMPLE_EDGE]
+ SHUFFLE [RS_90]
+ PartitionCols:_col0
+ Select Operator [SEL_82] (rows=36524 width=1119)
+ Output:["_col0"]
+ Filter Operator [FIL_231] (rows=36524 width=1119)
+ predicate:((d_year = 2001) and d_date_sk is not null)
+ TableScan [TS_80] (rows=73049 width=1119)
+ default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year"]
+ <-Reducer 27 [SIMPLE_EDGE]
+ SHUFFLE [RS_89]
+ PartitionCols:_col0
+ Merge Join Operator [MERGEJOIN_250] (rows=254100 width=1436)
+ Conds:RS_86._col1=RS_87._col0(Inner),Output:["_col0","_col1","_col2","_col3","_col4","_col6","_col7","_col8","_col10"]
+ <-Map 26 [SIMPLE_EDGE]
+ SHUFFLE [RS_86]
+ PartitionCols:_col1
+ Select Operator [SEL_76] (rows=1 width=0)
+ Output:["_col0","_col1","_col2","_col3","_col4"]
+ Filter Operator [FIL_229] (rows=1 width=0)
+ predicate:(cs_item_sk is not null and cs_sold_date_sk is not null)
+ TableScan [TS_74] (rows=1 width=0)
+ default@catalog_sales,catalog_sales,Tbl:PARTIAL,Col:NONE,Output:["cs_sold_date_sk","cs_item_sk","cs_order_number","cs_quantity","cs_ext_sales_price"]
+ <-Map 32 [SIMPLE_EDGE]
+ SHUFFLE [RS_87]
+ PartitionCols:_col0
+ Select Operator [SEL_79] (rows=231000 width=1436)
+ Output:["_col0","_col1","_col2","_col3","_col5"]
+ Filter Operator [FIL_230] (rows=231000 width=1436)
+ predicate:((i_category = 'Sports') and i_item_sk is not null and i_brand_id is not null and i_class_id is not null and i_category_id is not null and i_manufact_id is not null)
+ TableScan [TS_77] (rows=462000 width=1436)
+ default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_brand_id","i_class_id","i_category_id","i_category","i_manufact_id"]
+ <-Reducer 38 [CONTAINS]
+ Reduce Output Operator [RS_145]
+ PartitionCols:2001, _col1, _col2, _col3, _col4
+ Group By Operator [GBY_144] (rows=922383 width=1436)
+ Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"],aggregations:["sum(_col5)","sum(_col6)"],keys:2001, _col1, _col2, _col3, _col4
+ Select Operator [SEL_142] (rows=922383 width=1436)
Output:["_col1","_col2","_col3","_col4","_col5","_col6"]
- Merge Join Operator [MERGEJOIN_255] (rows=307461 width=1436)
- Conds:RS_114._col2, _col1=RS_115._col1, _col0(Left Outer),Output:["_col3","_col4","_col6","_col7","_col8","_col10","_col15","_col16"]
- <-Map 41 [SIMPLE_EDGE]
- SHUFFLE [RS_115]
- PartitionCols:_col1, _col0
- Select Operator [SEL_107] (rows=1 width=0)
- Output:["_col0","_col1","_col2","_col3"]
- Filter Operator [FIL_236] (rows=1 width=0)
- predicate:sr_item_sk is not null
- TableScan [TS_105] (rows=1 width=0)
- default@store_returns,store_returns,Tbl:PARTIAL,Col:NONE,Output:["sr_item_sk","sr_ticket_number","sr_return_quantity","sr_return_amt"]
- <-Reducer 37 [SIMPLE_EDGE]
- SHUFFLE [RS_114]
- PartitionCols:_col2, _col1
- Merge Join Operator [MERGEJOIN_254] (rows=279510 width=1436)
- Conds:RS_111._col0=RS_112._col0(Inner),Output:["_col1","_col2","_col3","_col4","_col6","_col7","_col8","_col10"]
- <-Map 40 [SIMPLE_EDGE]
- SHUFFLE [RS_112]
- PartitionCols:_col0
- Select Operator [SEL_104] (rows=36524 width=1119)
- Output:["_col0"]
- Filter Operator [FIL_235] (rows=36524 width=1119)
- predicate:((d_year = 2001) and d_date_sk is not null)
- TableScan [TS_102] (rows=73049 width=1119)
- default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year"]
- <-Reducer 36 [SIMPLE_EDGE]
- SHUFFLE [RS_111]
- PartitionCols:_col0
- Merge Join Operator [MERGEJOIN_253] (rows=254100 width=1436)
- Conds:RS_108._col1=RS_109._col0(Inner),Output:["_col0","_col1","_col2","_col3","_col4","_col6","_col7","_col8","_col10"]
- <-Map 35 [SIMPLE_EDGE]
- SHUFFLE [RS_108]
- PartitionCols:_col1
- Select Operator [SEL_98] (rows=1 width=0)
- Output:["_col0","_col1","_col2","_col3","_col4"]
- Filter Operator [FIL_233] (rows=1 width=0)
- predicate:(ss_item_sk is not null and ss_sold_date_sk is not null)
- TableScan [TS_96] (rows=1 width=0)
- default@store_sales,store_sales,Tbl:PARTIAL,Col:NONE,Output:["ss_sold_date_sk","ss_item_sk","ss_ticket_number","ss_quantity","ss_ext_sales_price"]
- <-Map 39 [SIMPLE_EDGE]
- SHUFFLE [RS_109]
- PartitionCols:_col0
- Select Operator [SEL_101] (rows=231000 width=1436)
- Output:["_col0","_col1","_col2","_col3","_col5"]
- Filter Operator [FIL_234] (rows=231000 width=1436)
- predicate:((i_category = 'Sports') and i_item_sk is not null and i_brand_id is not null and i_class_id is not null and i_category_id is not null and i_manufact_id is not null)
- TableScan [TS_99] (rows=462000 width=1436)
- default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_brand_id","i_class_id","i_category_id","i_category","i_manufact_id"]
- <-Reducer 45 [CONTAINS]
- Reduce Output Operator [RS_145]
- PartitionCols:_col0, _col1, _col2, _col3, _col4
- Group By Operator [GBY_144] (rows=922383 width=1436)
- Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"],aggregations:["sum(_col5)","sum(_col6)"],keys:_col0, _col1, _col2, _col3, _col4
- Select Operator [SEL_142] (rows=922383 width=1436)
- Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
- Select Operator [SEL_141] (rows=307461 width=1436)
+ Select Operator [SEL_117] (rows=307461 width=1436)
+ Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
+ Merge Join Operator [MERGEJOIN_255] (rows=307461 width=1436)
+ Conds:RS_114._col2, _col1=RS_115._col1, _col0(Left Outer),Output:["_col3","_col4","_col6","_col7","_col8","_col10","_col15","_col16"]
+ <-Map 41 [SIMPLE_EDGE]
+ SHUFFLE [RS_115]
+ PartitionCols:_col1, _col0
+ Select Operator [SEL_107] (rows=1 width=0)
+ Output:["_col0","_col1","_col2","_col3"]
+ Filter Operator [FIL_236] (rows=1 width=0)
+ predicate:sr_item_sk is not null
+ TableScan [TS_105] (rows=1 width=0)
+ default@store_returns,store_returns,Tbl:PARTIAL,Col:NONE,Output:["sr_item_sk","sr_ticket_number","sr_return_quantity","sr_return_amt"]
+ <-Reducer 37 [SIMPLE_EDGE]
+ SHUFFLE [RS_114]
+ PartitionCols:_col2, _col1
+ Merge Join Operator [MERGEJOIN_254] (rows=279510 width=1436)
+ Conds:RS_111._col0=RS_112._col0(Inner),Output:["_col1","_col2","_col3","_col4","_col6","_col7","_col8","_col10"]
+ <-Map 40 [SIMPLE_EDGE]
+ SHUFFLE [RS_112]
+ PartitionCols:_col0
+ Select Operator [SEL_104] (rows=36524 width=1119)
+ Output:["_col0"]
+ Filter Operator [FIL_235] (rows=36524 width=1119)
+ predicate:((d_year = 2001) and d_date_sk is not null)
+ TableScan [TS_102] (rows=73049 width=1119)
+ default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year"]
+ <-Reducer 36 [SIMPLE_EDGE]
+ SHUFFLE [RS_111]
+ PartitionCols:_col0
+ Merge Join Operator [MERGEJOIN_253] (rows=254100 width=1436)
+ Conds:RS_108._col1=RS_109._col0(Inner),Output:["_col0","_col1","_col2","_col3","_col4","_col6","_col7","_col8","_col10"]
+ <-Map 35 [SIMPLE_EDGE]
+ SHUFFLE [RS_108]
+ PartitionCols:_col1
+ Select Operator [SEL_98] (rows=1 width=0)
+ Output:["_col0","_col1","_col2","_col3","_col4"]
+ Filter Operator [FIL_233] (rows=1 width=0)
+ predicate:(ss_item_sk is not null and ss_sold_date_sk is not null)
+ TableScan [TS_96] (rows=1 width=0)
+ default@store_sales,store_sales,Tbl:PARTIAL,Col:NONE,Output:["ss_sold_date_sk","ss_item_sk","ss_ticket_number","ss_quantity","ss_ext_sales_price"]
+ <-Map 39 [SIMPLE_EDGE]
+ SHUFFLE [RS_109]
+ PartitionCols:_col0
+ Select Operator [SEL_101] (rows=231000 width=1436)
+ Output:["_col0","_col1","_col2","_col3","_col5"]
+ Filter Operator [FIL_234] (rows=231000 width=1436)
+ predicate:((i_category = 'Sports') and i_item_sk is not null and i_brand_id is not null and i_class_id is not null and i_category_id is not null and i_manufact_id is not null)
+ TableScan [TS_99] (rows=462000 width=1436)
+ default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_brand_id","i_class_id","i_category_id","i_category","i_manufact_id"]
+ <-Reducer 45 [CONTAINS]
+ Reduce Output Operator [RS_145]
+ PartitionCols:2001, _col1, _col2, _col3, _col4
+ Group By Operator [GBY_144] (rows=922383 width=1436)
+ Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"],aggregations:["sum(_col5)","sum(_col6)"],keys:2001, _col1, _col2, _col3, _col4
+ Select Operator [SEL_142] (rows=922383 width=1436)
Output:["_col1","_col2","_col3","_col4","_col5","_col6"]
- Merge Join Operator [MERGEJOIN_258] (rows=307461 width=1436)
- Conds:RS_138._col2, _col1=RS_139._col1, _col0(Left Outer),Output:["_col3","_col4","_col6","_col7","_col8","_col10","_col15","_col16"]
- <-Map 48 [SIMPLE_EDGE]
- SHUFFLE [RS_139]
- PartitionCols:_col1, _col0
- Select Operator [SEL_131] (rows=1 width=0)
- Output:["_col0","_col1","_col2","_col3"]
- Filter Operator [FIL_240] (rows=1 width=0)
- predicate:wr_item_sk is not null
- TableScan [TS_129] (rows=1 width=0)
- default@web_returns,web_returns,Tbl:PARTIAL,Col:NONE,Output:["wr_item_sk","wr_order_number","wr_return_quantity","wr_return_amt"]
- <-Reducer 44 [SIMPLE_EDGE]
- SHUFFLE [RS_138]
- PartitionCols:_col2, _col1
- Merge Join Operator [MERGEJOIN_257] (rows=279510 width=1436)
- Conds:RS_135._col0=RS_136._col0(Inner),Output:["_col1","_col2","_col3","_col4","_col6","_col7","_col8","_col10"]
- <-Map 47 [SIMPLE_EDGE]
- SHUFFLE [RS_136]
- PartitionCols:_col0
- Select Operator [SEL_128] (rows=36524 width=1119)
- Output:["_col0"]
- Filter Operator [FIL_239] (rows=36524 width=1119)
- predicate:((d_year = 2001) and d_date_sk is not null)
- TableScan [TS_126] (rows=73049 width=1119)
- default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year"]
- <-Reducer 43 [SIMPLE_EDGE]
- SHUFFLE [RS_135]
- PartitionCols:_col0
- Merge Join Operator [MERGEJOIN_256] (rows=254100 width=1436)
- Conds:RS_132._col1=RS_133._col0(Inner),Output:["_col0","_col1","_col2","_col3","_col4","_col6","_col7","_col8","_col10"]
- <-Map 42 [SIMPLE_EDGE]
- SHUFFLE [RS_132]
- PartitionCols:_col1
- Select Operator [SEL_122] (rows=1 width=0)
- Output:["_col0","_col1","_col2","_col3","_col4"]
- Filter Operator [FIL_237] (rows=1 width=0)
- predicate:(ws_item_sk is not null and ws_sold_date_sk is not null)
- TableScan [TS_120] (rows=1 width=0)
- default@web_sales,web_sales,Tbl:PARTIAL,Col:NONE,Output:["ws_sold_date_sk","ws_item_sk","ws_order_number","ws_quantity","ws_ext_sales_price"]
- <-Map 46 [SIMPLE_EDGE]
- SHUFFLE [RS_133]
- PartitionCols:_col0
- Select Operator [SEL_125] (rows=231000 width=1436)
- Output:["_col0","_col1","_col2","_col3","_col5"]
- Filter Operator [FIL_238] (rows=231000 width=1436)
- predicate:((i_category = 'Sports') and i_item_sk is not null and i_brand_id is not null and i_class_id is not null and i_category_id is not null and i_manufact_id is not null)
- TableScan [TS_123] (rows=462000 width=1436)
- default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_brand_id","i_class_id","i_category_id","i_category","i_manufact_id"]
+ Select Operator [SEL_141] (rows=307461 width=1436)
+ Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
+ Merge Join Operator [MERGEJOIN_258] (rows=307461 width=1436)
+ Conds:RS_138._col2, _col1=RS_139._col1, _col0(Left Outer),Output:["_col3","_col4","_col6","_col7","_col8","_col10","_col15","_col16"]
+ <-Map 48 [SIMPLE_EDGE]
+ SHUFFLE [RS_139]
+ PartitionCols:_col1, _col0
+ Select Operator [SEL_131] (rows=1 width=0)
+ Output:["_col0","_col1","_col2","_col3"]
+ Filter Operator [FIL_240] (rows=1 width=0)
+ predicate:wr_item_sk is not null
+ TableScan [TS_129] (rows=1 width=0)
+ default@web_returns,web_returns,Tbl:PARTIAL,Col:NONE,Output:["wr_item_sk","wr_order_number","wr_return_quantity","wr_return_amt"]
+ <-Reducer 44 [SIMPLE_EDGE]
+ SHUFFLE [RS_138]
+ PartitionCols:_col2, _col1
+ Merge Join Operator [MERGEJOIN_257] (rows=279510 width=1436)
+ Conds:RS_135._col0=RS_136._col0(Inner),Output:["_col1","_col2","_col3","_col4","_col6","_col7","_col8","_col10"]
+ <-Map 47 [SIMPLE_EDGE]
+ SHUFFLE [RS_136]
+ PartitionCols:_col0
+ Select Operator [SEL_128] (rows=36524 width=1119)
+ Output:["_col0"]
+ Filter Operator [FIL_239] (rows=36524 width=1119)
+ predicate:((d_year = 2001) and d_date_sk is not null)
+ TableScan [TS_126] (rows=73049 width=1119)
+ default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year"]
+ <-Reducer 43 [SIMPLE_EDGE]
+ SHUFFLE [RS_135]
+ PartitionCols:_col0
+ Merge Join Operator [MERGEJOIN_256] (rows=254100 width=1436)
+ Conds:RS_132._col1=RS_133._col0(Inner),Output:["_col0","_col1","_col2","_col3","_col4","_col6","_col7","_col8","_col10"]
+ <-Map 42 [SIMPLE_EDGE]
+ SHUFFLE [RS_132]
+ PartitionCols:_col1
+ Select Operator [SEL_122] (rows=1 width=0)
+ Output:["_col0","_col1","_col2","_col3","_col4"]
+ Filter Operator [FIL_237] (rows=1 width=0)
+ predicate:(ws_item_sk is not null and ws_sold_date_sk is not null)
+ TableScan [TS_120] (rows=1 width=0)
+ default@web_sales,web_sales,Tbl:PARTIAL,Col:NONE,Output:["ws_sold_date_sk","ws_item_sk","ws_order_number","ws_quantity","ws_ext_sales_price"]
+ <-Map 46 [SIMPLE_EDGE]
+ SHUFFLE [RS_133]
+ PartitionCols:_col0
+ Select Operator [SEL_125] (rows=231000 width=1436)
+ Output:["_col0","_col1","_col2","_col3","_col5"]
+ Filter Operator [FIL_238] (rows=231000 width=1436)
+ predicate:((i_category = 'Sports') and i_item_sk is not null and i_brand_id is not null and i_class_id is not null and i_category_id is not null and i_manufact_id is not null)
+ TableScan [TS_123] (rows=462000 width=1436)
+ default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_brand_id","i_class_id","i_category_id","i_category","i_manufact_id"]
<-Reducer 6 [SIMPLE_EDGE]
SHUFFLE [RS_148]
PartitionCols:_col1, _col2, _col3, _col4
- Group By Operator [GBY_72] (rows=461191 width=1436)
- Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)"],keys:KEY._col0, KEY._col1, KEY._col2, KEY._col3, KEY._col4
- <-Union 5 [SIMPLE_EDGE]
- <-Reducer 15 [CONTAINS]
- Reduce Output Operator [RS_71]
- PartitionCols:_col0, _col1, _col2, _col3, _col4
- Group By Operator [GBY_70] (rows=922383 width=1436)
- Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"],aggregations:["sum(_col5)","sum(_col6)"],keys:_col0, _col1, _col2, _col3, _col4
- Select Operator [SEL_68] (rows=922383 width=1436)
- Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
- Select Operator [SEL_43] (rows=307461 width=1436)
+ Select Operator [SEL_73] (rows=461191 width=1436)
+ Output:["_col1","_col2","_col3","_col4","_col5","_col6"]
+ Group By Operator [GBY_72] (rows=461191 width=1436)
+ Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)"],keys:2002, KEY._col1, KEY._col2, KEY._col3, KEY._col4
+ <-Union 5 [SIMPLE_EDGE]
+ <-Reducer 15 [CONTAINS]
+ Reduce Output Operator [RS_71]
+ PartitionCols:2002, _col1, _col2, _col3, _col4
+ Group By Operator [GBY_70] (rows=922383 width=1436)
+ Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"],aggregations:["sum(_col5)","sum(_col6)"],keys:2002, _col1, _col2, _col3, _col4
+ Select Operator [SEL_68] (rows=922383 width=1436)
Output:["_col1","_col2","_col3","_col4","_col5","_col6"]
- Merge Join Operator [MERGEJOIN_246] (rows=307461 width=1436)
- Conds:RS_40._col2, _col1=RS_41._col1, _col0(Left Outer),Output:["_col3","_col4","_col6","_col7","_col8","_col10","_col15","_col16"]
- <-Map 18 [SIMPLE_EDGE]
- SHUFFLE [RS_41]
- PartitionCols:_col1, _col0
- Select Operator [SEL_33] (rows=1 width=0)
- Output:["_col0","_col1","_col2","_col3"]
- Filter Operator [FIL_224] (rows=1 width=0)
- predicate:sr_item_sk is not null
- TableScan [TS_31] (rows=1 width=0)
- default@store_returns,store_returns,Tbl:PARTIAL,Col:NONE,Output:["sr_item_sk","sr_ticket_number","sr_return_quantity","sr_return_amt"]
- <-Reducer 14 [SIMPLE_EDGE]
- SHUFFLE [RS_40]
- PartitionCols:_col2, _col1
- Merge Join Operator [MERGEJOIN_245] (rows=279510 width=1436)
- Conds:RS_37._col0=RS_38._col0(Inner),Output:["_col1","_col2","_col3","_col4","_col6","_col7","_col8","_col10"]
- <-Map 17 [SIMPLE_EDGE]
- SHUFFLE [RS_38]
- PartitionCols:_col0
- Select Operator [SEL_30] (rows=36524 width=1119)
- Output:["_col0"]
- Filter Operator [FIL_223] (rows=36524 width=1119)
- predicate:((d_year = 2002) and d_date_sk is not null)
- TableScan [TS_28] (rows=73049 width=1119)
- default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year"]
- <-Reducer 13 [SIMPLE_EDGE]
- SHUFFLE [RS_37]
- PartitionCols:_col0
- Merge Join Operator [MERGEJOIN_244] (rows=254100 width=1436)
- Conds:RS_34._col1=RS_35._col0(Inner),Output:["_col0","_col1","_col2","_col3","_col4","_col6","_col7","_col8","_col10"]
- <-Map 12 [SIMPLE_EDGE]
- SHUFFLE [RS_34]
- PartitionCols:_col1
- Select Operator [SEL_24] (rows=1 width=0)
- Output:["_col0","_col1","_col2","_col3","_col4"]
- Filter Operator [FIL_221] (rows=1 width=0)
- predicate:(ss_item_sk is not null and ss_sold_date_sk is not null)
- TableScan [TS_22] (rows=1 width=0)
- default@store_sales,store_sales,Tbl:PARTIAL,Col:NONE,Output:["ss_sold_date_sk","ss_item_sk","ss_ticket_number","ss_quantity","ss_ext_sales_price"]
- <-Map 16 [SIMPLE_EDGE]
- SHUFFLE [RS_35]
- PartitionCols:_col0
- Select Operator [SEL_27] (rows=231000 width=1436)
- Output:["_col0","_col1","_col2","_col3","_col5"]
- Filter Operator [FIL_222] (rows=231000 width=1436)
- predicate:((i_category = 'Sports') and i_item_sk is not null and i_brand_id is not null and i_class_id is not null and i_category_id is not null and i_manufact_id is not null)
- TableScan [TS_25] (rows=462000 width=1436)
- default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_brand_id","i_class_id","i_category_id","i_category","i_manufact_id"]
- <-Reducer 22 [CONTAINS]
- Reduce Output Operator [RS_71]
- PartitionCols:_col0, _col1, _col2, _col3, _col4
- Group By Operator [GBY_70] (rows=922383 width=1436)
- Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"],aggregations:["sum(_col5)","sum(_col6)"],keys:_col0, _col1, _col2, _col3, _col4
- Select Operator [SEL_68] (rows=922383 width=1436)
- Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
- Select Operator [SEL_67] (rows=307461 width=1436)
+ Select Operator [SEL_43] (rows=307461 width=1436)
+ Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
+ Merge Join Operator [MERGEJOIN_246] (rows=307461 width=1436)
+ Conds:RS_40._col2, _col1=RS_41._col1, _col0(Left Outer),Output:["_col3","_col4","_col6","_col7","_col8","_col10","_col15","_col16"]
+ <-Map 18 [SIMPLE_EDGE]
+ SHUFFLE [RS_41]
+ PartitionCols:_col1, _col0
+ Select Operator [SEL_33] (rows=1 width=0)
+ Output:["_col0","_col1","_col2","_col3"]
+ Filter Operator [FIL_224] (rows=1 width=0)
+ predicate:sr_item_sk is not null
+ TableScan [TS_31] (rows=1 width=0)
+ default@store_returns,store_returns,Tbl:PARTIAL,Col:NONE,Output:["sr_item_sk","sr_ticket_number","sr_return_quantity","sr_return_amt"]
+ <-Reducer 14 [SIMPLE_EDGE]
+ SHUFFLE [RS_40]
+ PartitionCols:_col2, _col1
+ Merge Join Operator [MERGEJOIN_245] (rows=279510 width=1436)
+ Conds:RS_37._col0=RS_38._col0(Inner),Output:["_col1","_col2","_col3","_col4","_col6","_col7","_col8","_col10"]
+ <-Map 17 [SIMPLE_EDGE]
+ SHUFFLE [RS_38]
+ PartitionCols:_col0
+ Select Operator [SEL_30] (rows=36524 width=1119)
+ Output:["_col0"]
+ Filter Operator [FIL_223] (rows=36524 width=1119)
+ predicate:((d_year = 2002) and d_date_sk is not null)
+ TableScan [TS_28] (rows=73049 width=1119)
+ default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year"]
+ <-Reducer 13 [SIMPLE_EDGE]
+ SHUFFLE [RS_37]
+ PartitionCols:_col0
+ Merge Join Operator [MERGEJOIN_244] (rows=254100 width=1436)
+ Conds:RS_34._col1=RS_35._col0(Inner),Output:["_col0","_col1","_col2","_col3","_col4","_col6","_col7","_col8","_col10"]
+ <-Map 12 [SIMPLE_EDGE]
+ SHUFFLE [RS_34]
+ PartitionCols:_col1
+ Select Operator [SEL_24] (rows=1 width=0)
+ Output:["_col0","_col1","_col2","_col3","_col4"]
+ Filter Operator [FIL_221] (rows=1 width=0)
+ predicate:(ss_item_sk is not null and ss_sold_date_sk is not null)
+ TableScan [TS_22] (rows=1 width=0)
+ default@store_sales,store_sales,Tbl:PARTIAL,Col:NONE,Output:["ss_sold_date_sk","ss_item_sk","ss_ticket_number","ss_quantity","ss_ext_sales_price"]
+ <-Map 16 [SIMPLE_EDGE]
+ SHUFFLE [RS_35]
+ PartitionCols:_col0
+ Select Operator [SEL_27] (rows=231000 width=1436)
+ Output:["_col0","_col1","_col2","_col3","_col5"]
+ Filter Operator [FIL_222] (rows=231000 width=1436)
+ predicate:((i_category = 'Sports') and i_item_sk is not null and i_brand_id is not null and i_class_id is not null and i_category_id is not null and i_manufact_id is not null)
+ TableScan [TS_25] (rows=462000 width=1436)
+ default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_brand_id","i_class_id","i_category_id","i_category","i_manufact_id"]
+ <-Reducer 22 [CONTAINS]
+ Reduce Output Operator [RS_71]
+ PartitionCols:2002, _col1, _col2, _col3, _col4
+ Group By Operator [GBY_70] (rows=922383 width=1436)
+ Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"],aggregations:["sum(_col5)","sum(_col6)"],keys:2002, _col1, _col2, _col3, _col4
+ Select Operator [SEL_68] (rows=922383 width=1436)
Output:["_col1","_col2","_col3","_col4","_col5","_col6"]
- Merge Join Operator [MERGEJOIN_249] (rows=307461 width=1436)
- Conds:RS_64._col2, _col1=RS_65._col1, _col0(Left Outer),Output:["_col3","_col4","_col6","_col7","_col8","_col10","_col15","_col16"]
- <-Map 25 [SIMPLE_EDGE]
- SHUFFLE [RS_65]
- PartitionCols:_col1, _col0
- Select Operator [SEL_57] (rows=1 width=0)
- Output:["_col0","_col1","_col2","_col3"]
- Filter Operator [FIL_228] (rows=1 width=0)
- predicate:wr_item_sk is not null
- TableScan [TS_55] (rows=1 width=0)
- default@web_returns,web_returns,Tbl:PARTIAL,Col:NONE,Output:["wr_item_sk","wr_order_number","wr_return_quantity","wr_return_amt"]
- <-Reducer 21 [SIMPLE_EDGE]
- SHUFFLE [RS_64]
- PartitionCols:_col2, _col1
- Merge Join Operator [MERGEJOIN_248] (rows=279510 width=1436)
- Conds:RS_61._col0=RS_62._col0(Inner),Output:["_col1","_col2","_col3","_col4","_col6","_col7","_col8","_col10"]
- <-Map 24 [SIMPLE_EDGE]
- SHUFFLE [RS_62]
- PartitionCols:_col0
- Select Operator [SEL_54] (rows=36524 width=1119)
- Output:["_col0"]
- Filter Operator [FIL_227] (rows=36524 width=1119)
- predicate:((d_year = 2002) and d_date_sk is not null)
- TableScan [TS_52] (rows=73049 width=1119)
- default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year"]
- <-Reducer 20 [SIMPLE_EDGE]
- SHUFFLE [RS_61]
- PartitionCols:_col0
- Merge Join Operator [MERGEJOIN_247] (rows=254100 width=1436)
- Conds:RS_58._col1=RS_59._col0(Inner),Output:["_col0","_col1","_col2","_col3","_col4","_col6","_col7","_col8","_col10"]
- <-Map 19 [SIMPLE_EDGE]
- SHUFFLE [RS_58]
- PartitionCols:_col1
- Select Operator [SEL_48] (rows=1 width=0)
- Output:["_col0","_col1","_col2","_col3","_col4"]
- Filter Operator [FIL_225] (rows=1 width=0)
- predicate:(ws_item_sk is not null and ws_sold_date_sk is not null)
- TableScan [TS_46] (rows=1 width=0)
- default@web_sales,web_sales,Tbl:PARTIAL,Col:NONE,Output:["ws_sold_date_sk","ws_item_sk","ws_order_number","ws_quantity","ws_ext_sales_price"]
- <-Map 23 [SIMPLE_EDGE]
- SHUFFLE [RS_59]
- PartitionCols:_col0
- Select Operator [SEL_51] (rows=231000 width=1436)
- Output:["_col0","_col1","_col2","_col3","_col5"]
- Filter Operator [FIL_226] (rows=231000 width=1436)
- predicate:((i_category = 'Sports') and i_item_sk is not null and i_brand_id is not null and i_class_id is not null and i_category_id is not null and i_manufact_id is not null)
- TableScan [TS_49] (rows=462000 width=1436)
- default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_brand_id","i_class_id","i_category_id","i_category","i_manufact_id"]
- <-Reducer 4 [CONTAINS]
- Reduce Output Operator [RS_71]
- PartitionCols:_col0, _col1, _col2, _col3, _col4
- Group By Operator [GBY_70] (rows=922383 width=1436)
- Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"],aggregations:["sum(_col5)","sum(_col6)"],keys:_col0, _col1, _col2, _col3, _col4
- Select Operator [SEL_68] (rows=922383 width=1436)
- Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
- Select Operator [SEL_21] (rows=307461 width=1436)
+ Select Operator [SEL_67] (rows=307461 width=1436)
+ Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
+ Merge Join Operator [MERGEJOIN_249] (rows=307461 width=1436)
+ Conds:RS_64._col2, _col1=RS_65._col1, _col0(Left Outer),Output:["_col3","_col4","_col6","_col7","_col8","_col10","_col15","_col16"]
+ <-Map 25 [SIMPLE_EDGE]
+ SHUFFLE [RS_65]
+ PartitionCols:_col1, _col0
+ Select Operator [SEL_57] (rows=1 width=0)
+ Output:["_col0","_col1","_col2","_col3"]
+ Filter Operator [FIL_228] (rows=1 width=0)
+ predicate:wr_item_sk is not null
+ TableScan [TS_55] (rows=1 width=0)
+ default@web_returns,web_returns,Tbl:PARTIAL,Col:NONE,Output:["wr_item_sk","wr_order_number","wr_return_quantity","wr_return_amt"]
+ <-Reducer 21 [SIMPLE_EDGE]
+ SHUFFLE [RS_64]
+ PartitionCols:_col2, _col1
+ Merge Join Operator [MERGEJOIN_248] (rows=279510 width=1436)
+ Conds:RS_61._col0=RS_62._col0(Inner),Output:["_col1","_col2","_col3","_col4","_col6","_col7","_col8","_col10"]
+ <-Map 24 [SIMPLE_EDGE]
+ SHUFFLE [RS_62]
+ PartitionCols:_col0
+ Select Operator [SEL_54] (rows=36524 width=1119)
+ Output:["_col0"]
+ Filter Operator [FIL_227] (rows=36524 width=1119)
+ predicate:((d_year = 2002) and d_date_sk is not null)
+ TableScan [TS_52] (rows=73049 width=1119)
+ default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year"]
+ <-Reducer 20 [SIMPLE_EDGE]
+ SHUFFLE [RS_61]
+ PartitionCols:_col0
+ Merge Join Operator [MERGEJOIN_247] (rows=254100 width=1436)
+ Conds:RS_58._col1=RS_59._col0(Inner),Output:["_col0","_col1","_col2","_col3","_col4","_col6","_col7","_col8","_col10"]
+ <-Map 19 [SIMPLE_EDGE]
+ SHUFFLE [RS_58]
+ PartitionCols:_col1
+ Select Operator [SEL_48] (rows=1 width=0)
+ Output:["_col0","_col1","_col2","_col3","_col4"]
+ Filter Operator [FIL_225] (rows=1 width=0)
+ predicate:(ws_item_sk is not null and ws_sold_date_sk is not null)
+ TableScan [TS_46] (rows=1 width=0)
+ default@web_sales,web_sales,Tbl:PARTIAL,Col:NONE,Output:["ws_sold_date_sk","ws_item_sk","ws_order_number","ws_quantity","ws_ext_sales_price"]
+ <-Map 23 [SIMPLE_EDGE]
+ SHUFFLE [RS_59]
+ PartitionCols:_col0
+ Select Operator [SEL_51] (rows=231000 width=1436)
+ Output:["_col0","_col1","_col2","_col3","_col5"]
+ Filter Operator [FIL_226] (rows=231000 width=1436)
+ predicate:((i_category = 'Sports') and i_item_sk is not null and i_brand_id is not null and i_class_id is not null and i_category_id is not null and i_manufact_id is not null)
+ TableScan [TS_49] (rows=462000 width=1436)
+ default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_brand_id","i_class_id","i_category_id","i_category","i_manufact_id"]
+ <-Reducer 4 [CONTAINS]
+ Reduce Output Operator [RS_71]
+ PartitionCols:2002, _col1, _col2, _col3, _col4
+ Group By Operator [GBY_70] (rows=922383 width=1436)
+ Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"],aggregations:["sum(_col5)","sum(_col6)"],keys:2002, _col1, _col2, _col3, _col4
+ Select Operator [SEL_68] (rows=922383 width=1436)
Output:["_col1","_col2","_col3","_col4","_col5","_col6"]
- Merge Join Operator [MERGEJOIN_243] (rows=307461 width=1436)
- Conds:RS_18._col2, _col1=RS_19._col1, _col0(Left Outer),Output:["_col3","_col4","_col6","_col7","_col8","_col10","_col15","_col16"]
- <-Map 11 [SIMPLE_EDGE]
- SHUFFLE [RS_19]
- PartitionCols:_col1, _col0
- Select Operator [SEL_11] (rows=1 width=0)
- Output:["_col0","_col1","_col2","_col3"]
- Filter Operator [FIL_220] (rows=1 width=0)
- predicate:cr_item_sk is not null
- TableScan [TS_9] (rows=1 width=0)
- default@catalog_returns,catalog_returns,Tbl:PARTIAL,Col:NONE,Output:["cr_item_sk","cr_order_number","cr_return_quantity","cr_return_amount"]
- <-Reducer 3 [SIMPLE_EDGE]
- SHUFFLE [RS_18]
- PartitionCols:_col2, _col1
- Merge Join Operator [MERGEJOIN_242] (rows=279510 width=1436)
- Conds:RS_15._col0=RS_16._col0(Inner),Output:["_col1","_col2","_col3","_col4","_col6","_col7","_col8","_col10"]
- <-Map 10 [SIMPLE_EDGE]
- SHUFFLE [RS_16]
- PartitionCols:_col0
- Select Operator [SEL_8] (rows=36524 width=1119)
- Output:["_col0"]
- Filter Operator [FIL_219] (rows=36524 width=1119)
- predicate:((d_year = 2002) and d_date_sk is not null)
- TableScan [TS_6] (rows=73049 width=1119)
- default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year"]
- <-Reducer 2 [SIMPLE_EDGE]
- SHUFFLE [RS_15]
- PartitionCols:_col0
- Merge Join Operator [MERGEJOIN_241] (rows=254100 width=1436)
- Conds:RS_12._col1=RS_13._col0(Inner),Output:["_col0","_col1","_col2","_col3","_col4","_col6","_col7","_col8","_col10"]
- <-Map 1 [SIMPLE_EDGE]
- SHUFFLE [RS_12]
- PartitionCols:_col1
- Select Operator [SEL_2] (rows=1 width=0)
- Output:["_col0","_col1","_col2","_col3","_col4"]
- Filter Operator [FIL_217] (rows=1 width=0)
- predicate:(cs_item_sk is not null and cs_sold_date_sk is not null)
- TableScan [TS_0] (rows=1 width=0)
- default@catalog_sales,catalog_sales,Tbl:PARTIAL,Col:NONE,Output:["cs_sold_date_sk","cs_item_sk","cs_order_number","cs_quantity","cs_ext_sales_price"]
- <-Map 9 [SIMPLE_EDGE]
- SHUFFLE [RS_13]
- PartitionCols:_col0
- Select Operator [SEL_5] (rows=231000 width=1436)
- Output:["_col0","_col1","_col2","_col3","_col5"]
- Filter Operator [FIL_218] (rows=231000 width=1436)
- predicate:((i_category = 'Sports') and i_item_sk is not null and i_brand_id is not null and i_class_id is not null and i_category_id is not null and i_manufact_id is not null)
- TableScan [TS_3] (rows=462000 width=1436)
- default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_brand_id","i_class_id","i_category_id","i_category","i_manufact_id"]
+ Select Operator [SEL_21] (rows=307461 width=1436)
+ Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
+ Merge Join Operator [MERGEJOIN_243] (rows=307461 width=1436)
+ Conds:RS_18._col2, _col1=RS_19._col1, _col0(Left Outer),Output:["_col3","_col4","_col6","_col7","_col8","_col10","_col15","_col16"]
+ <-Map 11 [SIMPLE_EDGE]
+ SHUFFLE [RS_19]
+ PartitionCols:_col1, _col0
+ Select Operator [SEL_11] (rows=1 width=0)
+ Output:["_col0","_col1","_col2","_col3"]
+ Filter Operator [FIL_220] (rows=1 width=0)
+ predicate:cr_item_sk is not null
+ TableScan [TS_9] (rows=1 width=0)
+ default@catalog_returns,catalog_returns,Tbl:PARTIAL,Col:NONE,Output:["cr_item_sk","cr_order_number","cr_return_quantity","cr_return_amount"]
+ <-Reducer 3 [SIMPLE_EDGE]
+ SHUFFLE [RS_18]
+ PartitionCols:_col2, _col1
+ Merge Join Operator [MERGEJOIN_242] (rows=279510 width=1436)
+ Conds:RS_15._col0=RS_16._col0(Inner),Output:["_col1","_col2","_col3","_col4","_col6","_col7","_col8","_col10"]
+ <-Map 10 [SIMPLE_EDGE]
+ SHUFFLE [RS_16]
+ PartitionCols:_col0
+ Select Operator [SEL_8] (rows=36524 width=1119)
+ Output:["_col0"]
+ Filter Operator [FIL_219] (rows=36524 width=1119)
+ predicate:((d_year = 2002) and d_date_sk is not null)
+ TableScan [TS_6] (rows=73049 width=1119)
+ default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year"]
+ <-Reducer 2 [SIMPLE_EDGE]
+ SHUFFLE [RS_15]
+ PartitionCols:_col0
+ Merge Join Operator [MERGEJOIN_241] (rows=254100 width=1436)
+ Conds:RS_12._col1=RS_13._col0(Inner),Output:["_col0","_col1","_col2","_col3","_col4","_col6","_col7","_col8","_col10"]
+ <-Map 1 [SIMPLE_EDGE]
+ SHUFFLE [RS_12]
+ PartitionCols:_col1
+ Select Operator [SEL_2] (rows=1 width=0)
+ Output:["_col0","_col1","_col2","_col3","_col4"]
+ Filter Operator [FIL_217] (rows=1 width=0)
+ predicate:(cs_item_sk is not null and cs_sold_date_sk is not null)
+ TableScan [TS_0] (rows=1 width=0)
+ default@catalog_sales,catalog_sales,Tbl:PARTIAL,Col:NONE,Output:["cs_sold_date_sk","cs_item_sk","cs_order_number","cs_quantity","cs_ext_sales_price"]
+ <-Map 9 [SIMPLE_EDGE]
+ SHUFFLE [RS_13]
+ PartitionCols:_col0
+ Select Operator [SEL_5] (rows=231000 width=1436)
+ Output:["_col0","_col1","_col2","_col3","_col5"]
+ Filter Operator [FIL_218] (rows=231000 width=1436)
+ predicate:((i_category = 'Sports') and i_item_sk is not null and i_brand_id is not null and i_class_id is not null and i_category_id is not null and i_manufact_id is not null)
+ TableScan [TS_3] (rows=462000 width=1436)
+ default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_brand_id","i_class_id","i_category_id","i_category","i_manufact_id"]
http://git-wip-us.apache.org/repos/asf/hive/blob/09271872/ql/src/test/results/clientpositive/spark/union_remove_25.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union_remove_25.q.out b/ql/src/test/results/clientpositive/spark/union_remove_25.q.out
index 190bea5..eb95cad 100644
--- a/ql/src/test/results/clientpositive/spark/union_remove_25.q.out
+++ b/ql/src/test/results/clientpositive/spark/union_remove_25.q.out
@@ -458,21 +458,17 @@ STAGE PLANS:
Number of rows: 1000
Statistics: Num rows: 1000 Data size: 10000 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
- outputColumnNames: _col0, _col1, _col3
- Statistics: Num rows: 1000 Data size: 10000 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: _col0 (type: string), UDFToLong(_col1) (type: bigint), '2008-04-08' (type: string), _col3 (type: string)
- outputColumnNames: _col0, _col1, _col2, _col3
+ expressions: _col0 (type: string), UDFToLong(_col1) (type: bigint), '2008-04-08' (type: string), _col2 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 2000 Data size: 20000 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
Statistics: Num rows: 2000 Data size: 20000 Basic stats: COMPLETE Column stats: NONE
- File Output Operator
- compressed: false
- Statistics: Num rows: 2000 Data size: 20000 Basic stats: COMPLETE Column stats: NONE
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- name: default.outputtbl3
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.outputtbl3
Reducer 4
Reduce Operator Tree:
Select Operator
@@ -483,21 +479,17 @@ STAGE PLANS:
Number of rows: 1000
Statistics: Num rows: 1000 Data size: 10000 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
- outputColumnNames: _col0, _col1, _col3
- Statistics: Num rows: 1000 Data size: 10000 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: _col0 (type: string), UDFToLong(_col1) (type: bigint), '2008-04-08' (type: string), _col3 (type: string)
- outputColumnNames: _col0, _col1, _col2, _col3
+ expressions: _col0 (type: string), UDFToLong(_col1) (type: bigint), '2008-04-08' (type: string), _col2 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 2000 Data size: 20000 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
Statistics: Num rows: 2000 Data size: 20000 Basic stats: COMPLETE Column stats: NONE
- File Output Operator
- compressed: false
- Statistics: Num rows: 2000 Data size: 20000 Basic stats: COMPLETE Column stats: NONE
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- name: default.outputtbl3
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.outputtbl3
Stage: Stage-0
Move Operator
http://git-wip-us.apache.org/repos/asf/hive/blob/09271872/ql/src/test/results/clientpositive/spark/union_view.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union_view.q.out b/ql/src/test/results/clientpositive/spark/union_view.q.out
index 402d9fd..3372afb 100644
--- a/ql/src/test/results/clientpositive/spark/union_view.q.out
+++ b/ql/src/test/results/clientpositive/spark/union_view.q.out
@@ -272,10 +272,10 @@ STAGE PLANS:
Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: value (type: string)
- outputColumnNames: _col1
+ outputColumnNames: _col0
Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: 86 (type: int), _col1 (type: string), '1' (type: string)
+ expressions: 86 (type: int), _col0 (type: string), '1' (type: string)
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 252 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
File Output Operator
@@ -296,10 +296,10 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
expressions: value (type: string)
- outputColumnNames: _col1
+ outputColumnNames: _col0
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
- expressions: 86 (type: int), _col1 (type: string), '1' (type: string)
+ expressions: 86 (type: int), _col0 (type: string), '1' (type: string)
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 252 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
File Output Operator
@@ -320,10 +320,10 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
expressions: value (type: string)
- outputColumnNames: _col1
+ outputColumnNames: _col0
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
- expressions: 86 (type: int), _col1 (type: string), '1' (type: string)
+ expressions: 86 (type: int), _col0 (type: string), '1' (type: string)
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 252 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
File Output Operator
@@ -360,10 +360,10 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
expressions: value (type: string)
- outputColumnNames: _col1
+ outputColumnNames: _col0
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
- expressions: 86 (type: int), _col1 (type: string), '2' (type: string)
+ expressions: 86 (type: int), _col0 (type: string), '2' (type: string)
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 502 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
File Output Operator
@@ -384,10 +384,10 @@ STAGE PLANS:
Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: value (type: string)
- outputColumnNames: _col1
+ outputColumnNames: _col0
Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: 86 (type: int), _col1 (type: string), '2' (type: string)
+ expressions: 86 (type: int), _col0 (type: string), '2' (type: string)
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 502 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
File Output Operator
@@ -408,10 +408,10 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
expressions: value (type: string)
- outputColumnNames: _col1
+ outputColumnNames: _col0
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
- expressions: 86 (type: int), _col1 (type: string), '2' (type: string)
+ expressions: 86 (type: int), _col0 (type: string), '2' (type: string)
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 502 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
File Output Operator
@@ -448,10 +448,10 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
expressions: value (type: string)
- outputColumnNames: _col1
+ outputColumnNames: _col0
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
- expressions: 86 (type: int), _col1 (type: string), '3' (type: string)
+ expressions: 86 (type: int), _col0 (type: string), '3' (type: string)
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 502 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
File Output Operator
@@ -472,10 +472,10 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
expressions: value (type: string)
- outputColumnNames: _col1
+ outputColumnNames: _col0
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
- expressions: 86 (type: int), _col1 (type: string), '3' (type: string)
+ expressions: 86 (type: int), _col0 (type: string), '3' (type: string)
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 502 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
File Output Operator
@@ -496,10 +496,10 @@ STAGE PLANS:
Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: value (type: string)
- outputColumnNames: _col1
+ outputColumnNames: _col0
Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: 86 (type: int), _col1 (type: string), '3' (type: string)
+ expressions: 86 (type: int), _col0 (type: string), '3' (type: string)
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 502 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
File Output Operator
@@ -538,10 +538,10 @@ STAGE PLANS:
Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: value (type: string), ds (type: string)
- outputColumnNames: _col1, _col2
+ outputColumnNames: _col0, _col1
Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: _col1 (type: string), _col2 (type: string)
+ expressions: _col0 (type: string), _col1 (type: string)
outputColumnNames: _col1, _col2
Statistics: Num rows: 1250 Data size: 13280 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
@@ -560,10 +560,10 @@ STAGE PLANS:
Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: value (type: string), ds (type: string)
- outputColumnNames: _col1, _col2
+ outputColumnNames: _col0, _col1
Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: _col1 (type: string), _col2 (type: string)
+ expressions: _col0 (type: string), _col1 (type: string)
outputColumnNames: _col1, _col2
Statistics: Num rows: 1250 Data size: 13280 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
@@ -582,10 +582,10 @@ STAGE PLANS:
Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: value (type: string), ds (type: string)
- outputColumnNames: _col1, _col2
+ outputColumnNames: _col0, _col1
Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: _col1 (type: string), _col2 (type: string)
+ expressions: _col0 (type: string), _col1 (type: string)
outputColumnNames: _col1, _col2
Statistics: Num rows: 1250 Data size: 13280 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
@@ -931,10 +931,10 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
expressions: value (type: string)
- outputColumnNames: _col1
+ outputColumnNames: _col0
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
- expressions: 86 (type: int), _col1 (type: string), '4' (type: string)
+ expressions: 86 (type: int), _col0 (type: string), '4' (type: string)
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 252 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
File Output Operator
@@ -955,10 +955,10 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
expressions: value (type: string)
- outputColumnNames: _col1
+ outputColumnNames: _col0
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
- expressions: 86 (type: int), _col1 (type: string), '4' (type: string)
+ expressions: 86 (type: int), _col0 (type: string), '4' (type: string)
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 252 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
File Output Operator
@@ -979,10 +979,10 @@ STAGE PLANS:
Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: value (type: string)
- outputColumnNames: _col1
+ outputColumnNames: _col0
Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: 86 (type: int), _col1 (type: string), '4' (type: string)
+ expressions: 86 (type: int), _col0 (type: string), '4' (type: string)
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 252 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
File Output Operator
[06/20] hive git commit: HIVE-13351: Support drop Primary Key/Foreign
Key constraints (Hari Subramaniyan, reviewed by Ashutosh Chauhan)
Posted by jd...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
index 8da883d..36a0f96 100644
--- a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
+++ b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
@@ -8999,6 +8999,138 @@ void ForeignKeysResponse::printTo(std::ostream& out) const {
}
+DropConstraintRequest::~DropConstraintRequest() throw() {
+}
+
+
+void DropConstraintRequest::__set_dbname(const std::string& val) {
+ this->dbname = val;
+}
+
+void DropConstraintRequest::__set_tablename(const std::string& val) {
+ this->tablename = val;
+}
+
+void DropConstraintRequest::__set_constraintname(const std::string& val) {
+ this->constraintname = val;
+}
+
+uint32_t DropConstraintRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+ bool isset_dbname = false;
+ bool isset_tablename = false;
+ bool isset_constraintname = false;
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->dbname);
+ isset_dbname = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->tablename);
+ isset_tablename = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->constraintname);
+ isset_constraintname = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ if (!isset_dbname)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_tablename)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_constraintname)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ return xfer;
+}
+
+uint32_t DropConstraintRequest::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+ xfer += oprot->writeStructBegin("DropConstraintRequest");
+
+ xfer += oprot->writeFieldBegin("dbname", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeString(this->dbname);
+ xfer += oprot->writeFieldEnd();
+
+ xfer += oprot->writeFieldBegin("tablename", ::apache::thrift::protocol::T_STRING, 2);
+ xfer += oprot->writeString(this->tablename);
+ xfer += oprot->writeFieldEnd();
+
+ xfer += oprot->writeFieldBegin("constraintname", ::apache::thrift::protocol::T_STRING, 3);
+ xfer += oprot->writeString(this->constraintname);
+ xfer += oprot->writeFieldEnd();
+
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+void swap(DropConstraintRequest &a, DropConstraintRequest &b) {
+ using ::std::swap;
+ swap(a.dbname, b.dbname);
+ swap(a.tablename, b.tablename);
+ swap(a.constraintname, b.constraintname);
+}
+
+DropConstraintRequest::DropConstraintRequest(const DropConstraintRequest& other377) {
+ dbname = other377.dbname;
+ tablename = other377.tablename;
+ constraintname = other377.constraintname;
+}
+DropConstraintRequest& DropConstraintRequest::operator=(const DropConstraintRequest& other378) {
+ dbname = other378.dbname;
+ tablename = other378.tablename;
+ constraintname = other378.constraintname;
+ return *this;
+}
+void DropConstraintRequest::printTo(std::ostream& out) const {
+ using ::apache::thrift::to_string;
+ out << "DropConstraintRequest(";
+ out << "dbname=" << to_string(dbname);
+ out << ", " << "tablename=" << to_string(tablename);
+ out << ", " << "constraintname=" << to_string(constraintname);
+ out << ")";
+}
+
+
PartitionsByExprResult::~PartitionsByExprResult() throw() {
}
@@ -9038,14 +9170,14 @@ uint32_t PartitionsByExprResult::read(::apache::thrift::protocol::TProtocol* ipr
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->partitions.clear();
- uint32_t _size377;
- ::apache::thrift::protocol::TType _etype380;
- xfer += iprot->readListBegin(_etype380, _size377);
- this->partitions.resize(_size377);
- uint32_t _i381;
- for (_i381 = 0; _i381 < _size377; ++_i381)
+ uint32_t _size379;
+ ::apache::thrift::protocol::TType _etype382;
+ xfer += iprot->readListBegin(_etype382, _size379);
+ this->partitions.resize(_size379);
+ uint32_t _i383;
+ for (_i383 = 0; _i383 < _size379; ++_i383)
{
- xfer += this->partitions[_i381].read(iprot);
+ xfer += this->partitions[_i383].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -9086,10 +9218,10 @@ uint32_t PartitionsByExprResult::write(::apache::thrift::protocol::TProtocol* op
xfer += oprot->writeFieldBegin("partitions", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->partitions.size()));
- std::vector<Partition> ::const_iterator _iter382;
- for (_iter382 = this->partitions.begin(); _iter382 != this->partitions.end(); ++_iter382)
+ std::vector<Partition> ::const_iterator _iter384;
+ for (_iter384 = this->partitions.begin(); _iter384 != this->partitions.end(); ++_iter384)
{
- xfer += (*_iter382).write(oprot);
+ xfer += (*_iter384).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -9110,13 +9242,13 @@ void swap(PartitionsByExprResult &a, PartitionsByExprResult &b) {
swap(a.hasUnknownPartitions, b.hasUnknownPartitions);
}
-PartitionsByExprResult::PartitionsByExprResult(const PartitionsByExprResult& other383) {
- partitions = other383.partitions;
- hasUnknownPartitions = other383.hasUnknownPartitions;
+PartitionsByExprResult::PartitionsByExprResult(const PartitionsByExprResult& other385) {
+ partitions = other385.partitions;
+ hasUnknownPartitions = other385.hasUnknownPartitions;
}
-PartitionsByExprResult& PartitionsByExprResult::operator=(const PartitionsByExprResult& other384) {
- partitions = other384.partitions;
- hasUnknownPartitions = other384.hasUnknownPartitions;
+PartitionsByExprResult& PartitionsByExprResult::operator=(const PartitionsByExprResult& other386) {
+ partitions = other386.partitions;
+ hasUnknownPartitions = other386.hasUnknownPartitions;
return *this;
}
void PartitionsByExprResult::printTo(std::ostream& out) const {
@@ -9278,21 +9410,21 @@ void swap(PartitionsByExprRequest &a, PartitionsByExprRequest &b) {
swap(a.__isset, b.__isset);
}
-PartitionsByExprRequest::PartitionsByExprRequest(const PartitionsByExprRequest& other385) {
- dbName = other385.dbName;
- tblName = other385.tblName;
- expr = other385.expr;
- defaultPartitionName = other385.defaultPartitionName;
- maxParts = other385.maxParts;
- __isset = other385.__isset;
-}
-PartitionsByExprRequest& PartitionsByExprRequest::operator=(const PartitionsByExprRequest& other386) {
- dbName = other386.dbName;
- tblName = other386.tblName;
- expr = other386.expr;
- defaultPartitionName = other386.defaultPartitionName;
- maxParts = other386.maxParts;
- __isset = other386.__isset;
+PartitionsByExprRequest::PartitionsByExprRequest(const PartitionsByExprRequest& other387) {
+ dbName = other387.dbName;
+ tblName = other387.tblName;
+ expr = other387.expr;
+ defaultPartitionName = other387.defaultPartitionName;
+ maxParts = other387.maxParts;
+ __isset = other387.__isset;
+}
+PartitionsByExprRequest& PartitionsByExprRequest::operator=(const PartitionsByExprRequest& other388) {
+ dbName = other388.dbName;
+ tblName = other388.tblName;
+ expr = other388.expr;
+ defaultPartitionName = other388.defaultPartitionName;
+ maxParts = other388.maxParts;
+ __isset = other388.__isset;
return *this;
}
void PartitionsByExprRequest::printTo(std::ostream& out) const {
@@ -9341,14 +9473,14 @@ uint32_t TableStatsResult::read(::apache::thrift::protocol::TProtocol* iprot) {
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->tableStats.clear();
- uint32_t _size387;
- ::apache::thrift::protocol::TType _etype390;
- xfer += iprot->readListBegin(_etype390, _size387);
- this->tableStats.resize(_size387);
- uint32_t _i391;
- for (_i391 = 0; _i391 < _size387; ++_i391)
+ uint32_t _size389;
+ ::apache::thrift::protocol::TType _etype392;
+ xfer += iprot->readListBegin(_etype392, _size389);
+ this->tableStats.resize(_size389);
+ uint32_t _i393;
+ for (_i393 = 0; _i393 < _size389; ++_i393)
{
- xfer += this->tableStats[_i391].read(iprot);
+ xfer += this->tableStats[_i393].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -9379,10 +9511,10 @@ uint32_t TableStatsResult::write(::apache::thrift::protocol::TProtocol* oprot) c
xfer += oprot->writeFieldBegin("tableStats", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->tableStats.size()));
- std::vector<ColumnStatisticsObj> ::const_iterator _iter392;
- for (_iter392 = this->tableStats.begin(); _iter392 != this->tableStats.end(); ++_iter392)
+ std::vector<ColumnStatisticsObj> ::const_iterator _iter394;
+ for (_iter394 = this->tableStats.begin(); _iter394 != this->tableStats.end(); ++_iter394)
{
- xfer += (*_iter392).write(oprot);
+ xfer += (*_iter394).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -9398,11 +9530,11 @@ void swap(TableStatsResult &a, TableStatsResult &b) {
swap(a.tableStats, b.tableStats);
}
-TableStatsResult::TableStatsResult(const TableStatsResult& other393) {
- tableStats = other393.tableStats;
+TableStatsResult::TableStatsResult(const TableStatsResult& other395) {
+ tableStats = other395.tableStats;
}
-TableStatsResult& TableStatsResult::operator=(const TableStatsResult& other394) {
- tableStats = other394.tableStats;
+TableStatsResult& TableStatsResult::operator=(const TableStatsResult& other396) {
+ tableStats = other396.tableStats;
return *this;
}
void TableStatsResult::printTo(std::ostream& out) const {
@@ -9447,26 +9579,26 @@ uint32_t PartitionsStatsResult::read(::apache::thrift::protocol::TProtocol* ipro
if (ftype == ::apache::thrift::protocol::T_MAP) {
{
this->partStats.clear();
- uint32_t _size395;
- ::apache::thrift::protocol::TType _ktype396;
- ::apache::thrift::protocol::TType _vtype397;
- xfer += iprot->readMapBegin(_ktype396, _vtype397, _size395);
- uint32_t _i399;
- for (_i399 = 0; _i399 < _size395; ++_i399)
+ uint32_t _size397;
+ ::apache::thrift::protocol::TType _ktype398;
+ ::apache::thrift::protocol::TType _vtype399;
+ xfer += iprot->readMapBegin(_ktype398, _vtype399, _size397);
+ uint32_t _i401;
+ for (_i401 = 0; _i401 < _size397; ++_i401)
{
- std::string _key400;
- xfer += iprot->readString(_key400);
- std::vector<ColumnStatisticsObj> & _val401 = this->partStats[_key400];
+ std::string _key402;
+ xfer += iprot->readString(_key402);
+ std::vector<ColumnStatisticsObj> & _val403 = this->partStats[_key402];
{
- _val401.clear();
- uint32_t _size402;
- ::apache::thrift::protocol::TType _etype405;
- xfer += iprot->readListBegin(_etype405, _size402);
- _val401.resize(_size402);
- uint32_t _i406;
- for (_i406 = 0; _i406 < _size402; ++_i406)
+ _val403.clear();
+ uint32_t _size404;
+ ::apache::thrift::protocol::TType _etype407;
+ xfer += iprot->readListBegin(_etype407, _size404);
+ _val403.resize(_size404);
+ uint32_t _i408;
+ for (_i408 = 0; _i408 < _size404; ++_i408)
{
- xfer += _val401[_i406].read(iprot);
+ xfer += _val403[_i408].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -9500,16 +9632,16 @@ uint32_t PartitionsStatsResult::write(::apache::thrift::protocol::TProtocol* opr
xfer += oprot->writeFieldBegin("partStats", ::apache::thrift::protocol::T_MAP, 1);
{
xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_LIST, static_cast<uint32_t>(this->partStats.size()));
- std::map<std::string, std::vector<ColumnStatisticsObj> > ::const_iterator _iter407;
- for (_iter407 = this->partStats.begin(); _iter407 != this->partStats.end(); ++_iter407)
+ std::map<std::string, std::vector<ColumnStatisticsObj> > ::const_iterator _iter409;
+ for (_iter409 = this->partStats.begin(); _iter409 != this->partStats.end(); ++_iter409)
{
- xfer += oprot->writeString(_iter407->first);
+ xfer += oprot->writeString(_iter409->first);
{
- xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(_iter407->second.size()));
- std::vector<ColumnStatisticsObj> ::const_iterator _iter408;
- for (_iter408 = _iter407->second.begin(); _iter408 != _iter407->second.end(); ++_iter408)
+ xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(_iter409->second.size()));
+ std::vector<ColumnStatisticsObj> ::const_iterator _iter410;
+ for (_iter410 = _iter409->second.begin(); _iter410 != _iter409->second.end(); ++_iter410)
{
- xfer += (*_iter408).write(oprot);
+ xfer += (*_iter410).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -9528,11 +9660,11 @@ void swap(PartitionsStatsResult &a, PartitionsStatsResult &b) {
swap(a.partStats, b.partStats);
}
-PartitionsStatsResult::PartitionsStatsResult(const PartitionsStatsResult& other409) {
- partStats = other409.partStats;
+PartitionsStatsResult::PartitionsStatsResult(const PartitionsStatsResult& other411) {
+ partStats = other411.partStats;
}
-PartitionsStatsResult& PartitionsStatsResult::operator=(const PartitionsStatsResult& other410) {
- partStats = other410.partStats;
+PartitionsStatsResult& PartitionsStatsResult::operator=(const PartitionsStatsResult& other412) {
+ partStats = other412.partStats;
return *this;
}
void PartitionsStatsResult::printTo(std::ostream& out) const {
@@ -9603,14 +9735,14 @@ uint32_t TableStatsRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->colNames.clear();
- uint32_t _size411;
- ::apache::thrift::protocol::TType _etype414;
- xfer += iprot->readListBegin(_etype414, _size411);
- this->colNames.resize(_size411);
- uint32_t _i415;
- for (_i415 = 0; _i415 < _size411; ++_i415)
+ uint32_t _size413;
+ ::apache::thrift::protocol::TType _etype416;
+ xfer += iprot->readListBegin(_etype416, _size413);
+ this->colNames.resize(_size413);
+ uint32_t _i417;
+ for (_i417 = 0; _i417 < _size413; ++_i417)
{
- xfer += iprot->readString(this->colNames[_i415]);
+ xfer += iprot->readString(this->colNames[_i417]);
}
xfer += iprot->readListEnd();
}
@@ -9653,10 +9785,10 @@ uint32_t TableStatsRequest::write(::apache::thrift::protocol::TProtocol* oprot)
xfer += oprot->writeFieldBegin("colNames", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->colNames.size()));
- std::vector<std::string> ::const_iterator _iter416;
- for (_iter416 = this->colNames.begin(); _iter416 != this->colNames.end(); ++_iter416)
+ std::vector<std::string> ::const_iterator _iter418;
+ for (_iter418 = this->colNames.begin(); _iter418 != this->colNames.end(); ++_iter418)
{
- xfer += oprot->writeString((*_iter416));
+ xfer += oprot->writeString((*_iter418));
}
xfer += oprot->writeListEnd();
}
@@ -9674,15 +9806,15 @@ void swap(TableStatsRequest &a, TableStatsRequest &b) {
swap(a.colNames, b.colNames);
}
-TableStatsRequest::TableStatsRequest(const TableStatsRequest& other417) {
- dbName = other417.dbName;
- tblName = other417.tblName;
- colNames = other417.colNames;
+TableStatsRequest::TableStatsRequest(const TableStatsRequest& other419) {
+ dbName = other419.dbName;
+ tblName = other419.tblName;
+ colNames = other419.colNames;
}
-TableStatsRequest& TableStatsRequest::operator=(const TableStatsRequest& other418) {
- dbName = other418.dbName;
- tblName = other418.tblName;
- colNames = other418.colNames;
+TableStatsRequest& TableStatsRequest::operator=(const TableStatsRequest& other420) {
+ dbName = other420.dbName;
+ tblName = other420.tblName;
+ colNames = other420.colNames;
return *this;
}
void TableStatsRequest::printTo(std::ostream& out) const {
@@ -9760,14 +9892,14 @@ uint32_t PartitionsStatsRequest::read(::apache::thrift::protocol::TProtocol* ipr
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->colNames.clear();
- uint32_t _size419;
- ::apache::thrift::protocol::TType _etype422;
- xfer += iprot->readListBegin(_etype422, _size419);
- this->colNames.resize(_size419);
- uint32_t _i423;
- for (_i423 = 0; _i423 < _size419; ++_i423)
+ uint32_t _size421;
+ ::apache::thrift::protocol::TType _etype424;
+ xfer += iprot->readListBegin(_etype424, _size421);
+ this->colNames.resize(_size421);
+ uint32_t _i425;
+ for (_i425 = 0; _i425 < _size421; ++_i425)
{
- xfer += iprot->readString(this->colNames[_i423]);
+ xfer += iprot->readString(this->colNames[_i425]);
}
xfer += iprot->readListEnd();
}
@@ -9780,14 +9912,14 @@ uint32_t PartitionsStatsRequest::read(::apache::thrift::protocol::TProtocol* ipr
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->partNames.clear();
- uint32_t _size424;
- ::apache::thrift::protocol::TType _etype427;
- xfer += iprot->readListBegin(_etype427, _size424);
- this->partNames.resize(_size424);
- uint32_t _i428;
- for (_i428 = 0; _i428 < _size424; ++_i428)
+ uint32_t _size426;
+ ::apache::thrift::protocol::TType _etype429;
+ xfer += iprot->readListBegin(_etype429, _size426);
+ this->partNames.resize(_size426);
+ uint32_t _i430;
+ for (_i430 = 0; _i430 < _size426; ++_i430)
{
- xfer += iprot->readString(this->partNames[_i428]);
+ xfer += iprot->readString(this->partNames[_i430]);
}
xfer += iprot->readListEnd();
}
@@ -9832,10 +9964,10 @@ uint32_t PartitionsStatsRequest::write(::apache::thrift::protocol::TProtocol* op
xfer += oprot->writeFieldBegin("colNames", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->colNames.size()));
- std::vector<std::string> ::const_iterator _iter429;
- for (_iter429 = this->colNames.begin(); _iter429 != this->colNames.end(); ++_iter429)
+ std::vector<std::string> ::const_iterator _iter431;
+ for (_iter431 = this->colNames.begin(); _iter431 != this->colNames.end(); ++_iter431)
{
- xfer += oprot->writeString((*_iter429));
+ xfer += oprot->writeString((*_iter431));
}
xfer += oprot->writeListEnd();
}
@@ -9844,10 +9976,10 @@ uint32_t PartitionsStatsRequest::write(::apache::thrift::protocol::TProtocol* op
xfer += oprot->writeFieldBegin("partNames", ::apache::thrift::protocol::T_LIST, 4);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->partNames.size()));
- std::vector<std::string> ::const_iterator _iter430;
- for (_iter430 = this->partNames.begin(); _iter430 != this->partNames.end(); ++_iter430)
+ std::vector<std::string> ::const_iterator _iter432;
+ for (_iter432 = this->partNames.begin(); _iter432 != this->partNames.end(); ++_iter432)
{
- xfer += oprot->writeString((*_iter430));
+ xfer += oprot->writeString((*_iter432));
}
xfer += oprot->writeListEnd();
}
@@ -9866,17 +9998,17 @@ void swap(PartitionsStatsRequest &a, PartitionsStatsRequest &b) {
swap(a.partNames, b.partNames);
}
-PartitionsStatsRequest::PartitionsStatsRequest(const PartitionsStatsRequest& other431) {
- dbName = other431.dbName;
- tblName = other431.tblName;
- colNames = other431.colNames;
- partNames = other431.partNames;
+PartitionsStatsRequest::PartitionsStatsRequest(const PartitionsStatsRequest& other433) {
+ dbName = other433.dbName;
+ tblName = other433.tblName;
+ colNames = other433.colNames;
+ partNames = other433.partNames;
}
-PartitionsStatsRequest& PartitionsStatsRequest::operator=(const PartitionsStatsRequest& other432) {
- dbName = other432.dbName;
- tblName = other432.tblName;
- colNames = other432.colNames;
- partNames = other432.partNames;
+PartitionsStatsRequest& PartitionsStatsRequest::operator=(const PartitionsStatsRequest& other434) {
+ dbName = other434.dbName;
+ tblName = other434.tblName;
+ colNames = other434.colNames;
+ partNames = other434.partNames;
return *this;
}
void PartitionsStatsRequest::printTo(std::ostream& out) const {
@@ -9924,14 +10056,14 @@ uint32_t AddPartitionsResult::read(::apache::thrift::protocol::TProtocol* iprot)
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->partitions.clear();
- uint32_t _size433;
- ::apache::thrift::protocol::TType _etype436;
- xfer += iprot->readListBegin(_etype436, _size433);
- this->partitions.resize(_size433);
- uint32_t _i437;
- for (_i437 = 0; _i437 < _size433; ++_i437)
+ uint32_t _size435;
+ ::apache::thrift::protocol::TType _etype438;
+ xfer += iprot->readListBegin(_etype438, _size435);
+ this->partitions.resize(_size435);
+ uint32_t _i439;
+ for (_i439 = 0; _i439 < _size435; ++_i439)
{
- xfer += this->partitions[_i437].read(iprot);
+ xfer += this->partitions[_i439].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -9961,10 +10093,10 @@ uint32_t AddPartitionsResult::write(::apache::thrift::protocol::TProtocol* oprot
xfer += oprot->writeFieldBegin("partitions", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->partitions.size()));
- std::vector<Partition> ::const_iterator _iter438;
- for (_iter438 = this->partitions.begin(); _iter438 != this->partitions.end(); ++_iter438)
+ std::vector<Partition> ::const_iterator _iter440;
+ for (_iter440 = this->partitions.begin(); _iter440 != this->partitions.end(); ++_iter440)
{
- xfer += (*_iter438).write(oprot);
+ xfer += (*_iter440).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -9981,13 +10113,13 @@ void swap(AddPartitionsResult &a, AddPartitionsResult &b) {
swap(a.__isset, b.__isset);
}
-AddPartitionsResult::AddPartitionsResult(const AddPartitionsResult& other439) {
- partitions = other439.partitions;
- __isset = other439.__isset;
+AddPartitionsResult::AddPartitionsResult(const AddPartitionsResult& other441) {
+ partitions = other441.partitions;
+ __isset = other441.__isset;
}
-AddPartitionsResult& AddPartitionsResult::operator=(const AddPartitionsResult& other440) {
- partitions = other440.partitions;
- __isset = other440.__isset;
+AddPartitionsResult& AddPartitionsResult::operator=(const AddPartitionsResult& other442) {
+ partitions = other442.partitions;
+ __isset = other442.__isset;
return *this;
}
void AddPartitionsResult::printTo(std::ostream& out) const {
@@ -10068,14 +10200,14 @@ uint32_t AddPartitionsRequest::read(::apache::thrift::protocol::TProtocol* iprot
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->parts.clear();
- uint32_t _size441;
- ::apache::thrift::protocol::TType _etype444;
- xfer += iprot->readListBegin(_etype444, _size441);
- this->parts.resize(_size441);
- uint32_t _i445;
- for (_i445 = 0; _i445 < _size441; ++_i445)
+ uint32_t _size443;
+ ::apache::thrift::protocol::TType _etype446;
+ xfer += iprot->readListBegin(_etype446, _size443);
+ this->parts.resize(_size443);
+ uint32_t _i447;
+ for (_i447 = 0; _i447 < _size443; ++_i447)
{
- xfer += this->parts[_i445].read(iprot);
+ xfer += this->parts[_i447].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -10136,10 +10268,10 @@ uint32_t AddPartitionsRequest::write(::apache::thrift::protocol::TProtocol* opro
xfer += oprot->writeFieldBegin("parts", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->parts.size()));
- std::vector<Partition> ::const_iterator _iter446;
- for (_iter446 = this->parts.begin(); _iter446 != this->parts.end(); ++_iter446)
+ std::vector<Partition> ::const_iterator _iter448;
+ for (_iter448 = this->parts.begin(); _iter448 != this->parts.end(); ++_iter448)
{
- xfer += (*_iter446).write(oprot);
+ xfer += (*_iter448).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -10169,21 +10301,21 @@ void swap(AddPartitionsRequest &a, AddPartitionsRequest &b) {
swap(a.__isset, b.__isset);
}
-AddPartitionsRequest::AddPartitionsRequest(const AddPartitionsRequest& other447) {
- dbName = other447.dbName;
- tblName = other447.tblName;
- parts = other447.parts;
- ifNotExists = other447.ifNotExists;
- needResult = other447.needResult;
- __isset = other447.__isset;
-}
-AddPartitionsRequest& AddPartitionsRequest::operator=(const AddPartitionsRequest& other448) {
- dbName = other448.dbName;
- tblName = other448.tblName;
- parts = other448.parts;
- ifNotExists = other448.ifNotExists;
- needResult = other448.needResult;
- __isset = other448.__isset;
+AddPartitionsRequest::AddPartitionsRequest(const AddPartitionsRequest& other449) {
+ dbName = other449.dbName;
+ tblName = other449.tblName;
+ parts = other449.parts;
+ ifNotExists = other449.ifNotExists;
+ needResult = other449.needResult;
+ __isset = other449.__isset;
+}
+AddPartitionsRequest& AddPartitionsRequest::operator=(const AddPartitionsRequest& other450) {
+ dbName = other450.dbName;
+ tblName = other450.tblName;
+ parts = other450.parts;
+ ifNotExists = other450.ifNotExists;
+ needResult = other450.needResult;
+ __isset = other450.__isset;
return *this;
}
void AddPartitionsRequest::printTo(std::ostream& out) const {
@@ -10232,14 +10364,14 @@ uint32_t DropPartitionsResult::read(::apache::thrift::protocol::TProtocol* iprot
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->partitions.clear();
- uint32_t _size449;
- ::apache::thrift::protocol::TType _etype452;
- xfer += iprot->readListBegin(_etype452, _size449);
- this->partitions.resize(_size449);
- uint32_t _i453;
- for (_i453 = 0; _i453 < _size449; ++_i453)
+ uint32_t _size451;
+ ::apache::thrift::protocol::TType _etype454;
+ xfer += iprot->readListBegin(_etype454, _size451);
+ this->partitions.resize(_size451);
+ uint32_t _i455;
+ for (_i455 = 0; _i455 < _size451; ++_i455)
{
- xfer += this->partitions[_i453].read(iprot);
+ xfer += this->partitions[_i455].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -10269,10 +10401,10 @@ uint32_t DropPartitionsResult::write(::apache::thrift::protocol::TProtocol* opro
xfer += oprot->writeFieldBegin("partitions", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->partitions.size()));
- std::vector<Partition> ::const_iterator _iter454;
- for (_iter454 = this->partitions.begin(); _iter454 != this->partitions.end(); ++_iter454)
+ std::vector<Partition> ::const_iterator _iter456;
+ for (_iter456 = this->partitions.begin(); _iter456 != this->partitions.end(); ++_iter456)
{
- xfer += (*_iter454).write(oprot);
+ xfer += (*_iter456).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -10289,13 +10421,13 @@ void swap(DropPartitionsResult &a, DropPartitionsResult &b) {
swap(a.__isset, b.__isset);
}
-DropPartitionsResult::DropPartitionsResult(const DropPartitionsResult& other455) {
- partitions = other455.partitions;
- __isset = other455.__isset;
+DropPartitionsResult::DropPartitionsResult(const DropPartitionsResult& other457) {
+ partitions = other457.partitions;
+ __isset = other457.__isset;
}
-DropPartitionsResult& DropPartitionsResult::operator=(const DropPartitionsResult& other456) {
- partitions = other456.partitions;
- __isset = other456.__isset;
+DropPartitionsResult& DropPartitionsResult::operator=(const DropPartitionsResult& other458) {
+ partitions = other458.partitions;
+ __isset = other458.__isset;
return *this;
}
void DropPartitionsResult::printTo(std::ostream& out) const {
@@ -10397,15 +10529,15 @@ void swap(DropPartitionsExpr &a, DropPartitionsExpr &b) {
swap(a.__isset, b.__isset);
}
-DropPartitionsExpr::DropPartitionsExpr(const DropPartitionsExpr& other457) {
- expr = other457.expr;
- partArchiveLevel = other457.partArchiveLevel;
- __isset = other457.__isset;
+DropPartitionsExpr::DropPartitionsExpr(const DropPartitionsExpr& other459) {
+ expr = other459.expr;
+ partArchiveLevel = other459.partArchiveLevel;
+ __isset = other459.__isset;
}
-DropPartitionsExpr& DropPartitionsExpr::operator=(const DropPartitionsExpr& other458) {
- expr = other458.expr;
- partArchiveLevel = other458.partArchiveLevel;
- __isset = other458.__isset;
+DropPartitionsExpr& DropPartitionsExpr::operator=(const DropPartitionsExpr& other460) {
+ expr = other460.expr;
+ partArchiveLevel = other460.partArchiveLevel;
+ __isset = other460.__isset;
return *this;
}
void DropPartitionsExpr::printTo(std::ostream& out) const {
@@ -10454,14 +10586,14 @@ uint32_t RequestPartsSpec::read(::apache::thrift::protocol::TProtocol* iprot) {
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->names.clear();
- uint32_t _size459;
- ::apache::thrift::protocol::TType _etype462;
- xfer += iprot->readListBegin(_etype462, _size459);
- this->names.resize(_size459);
- uint32_t _i463;
- for (_i463 = 0; _i463 < _size459; ++_i463)
+ uint32_t _size461;
+ ::apache::thrift::protocol::TType _etype464;
+ xfer += iprot->readListBegin(_etype464, _size461);
+ this->names.resize(_size461);
+ uint32_t _i465;
+ for (_i465 = 0; _i465 < _size461; ++_i465)
{
- xfer += iprot->readString(this->names[_i463]);
+ xfer += iprot->readString(this->names[_i465]);
}
xfer += iprot->readListEnd();
}
@@ -10474,14 +10606,14 @@ uint32_t RequestPartsSpec::read(::apache::thrift::protocol::TProtocol* iprot) {
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->exprs.clear();
- uint32_t _size464;
- ::apache::thrift::protocol::TType _etype467;
- xfer += iprot->readListBegin(_etype467, _size464);
- this->exprs.resize(_size464);
- uint32_t _i468;
- for (_i468 = 0; _i468 < _size464; ++_i468)
+ uint32_t _size466;
+ ::apache::thrift::protocol::TType _etype469;
+ xfer += iprot->readListBegin(_etype469, _size466);
+ this->exprs.resize(_size466);
+ uint32_t _i470;
+ for (_i470 = 0; _i470 < _size466; ++_i470)
{
- xfer += this->exprs[_i468].read(iprot);
+ xfer += this->exprs[_i470].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -10510,10 +10642,10 @@ uint32_t RequestPartsSpec::write(::apache::thrift::protocol::TProtocol* oprot) c
xfer += oprot->writeFieldBegin("names", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->names.size()));
- std::vector<std::string> ::const_iterator _iter469;
- for (_iter469 = this->names.begin(); _iter469 != this->names.end(); ++_iter469)
+ std::vector<std::string> ::const_iterator _iter471;
+ for (_iter471 = this->names.begin(); _iter471 != this->names.end(); ++_iter471)
{
- xfer += oprot->writeString((*_iter469));
+ xfer += oprot->writeString((*_iter471));
}
xfer += oprot->writeListEnd();
}
@@ -10522,10 +10654,10 @@ uint32_t RequestPartsSpec::write(::apache::thrift::protocol::TProtocol* oprot) c
xfer += oprot->writeFieldBegin("exprs", ::apache::thrift::protocol::T_LIST, 2);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->exprs.size()));
- std::vector<DropPartitionsExpr> ::const_iterator _iter470;
- for (_iter470 = this->exprs.begin(); _iter470 != this->exprs.end(); ++_iter470)
+ std::vector<DropPartitionsExpr> ::const_iterator _iter472;
+ for (_iter472 = this->exprs.begin(); _iter472 != this->exprs.end(); ++_iter472)
{
- xfer += (*_iter470).write(oprot);
+ xfer += (*_iter472).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -10543,15 +10675,15 @@ void swap(RequestPartsSpec &a, RequestPartsSpec &b) {
swap(a.__isset, b.__isset);
}
-RequestPartsSpec::RequestPartsSpec(const RequestPartsSpec& other471) {
- names = other471.names;
- exprs = other471.exprs;
- __isset = other471.__isset;
+RequestPartsSpec::RequestPartsSpec(const RequestPartsSpec& other473) {
+ names = other473.names;
+ exprs = other473.exprs;
+ __isset = other473.__isset;
}
-RequestPartsSpec& RequestPartsSpec::operator=(const RequestPartsSpec& other472) {
- names = other472.names;
- exprs = other472.exprs;
- __isset = other472.__isset;
+RequestPartsSpec& RequestPartsSpec::operator=(const RequestPartsSpec& other474) {
+ names = other474.names;
+ exprs = other474.exprs;
+ __isset = other474.__isset;
return *this;
}
void RequestPartsSpec::printTo(std::ostream& out) const {
@@ -10770,27 +10902,27 @@ void swap(DropPartitionsRequest &a, DropPartitionsRequest &b) {
swap(a.__isset, b.__isset);
}
-DropPartitionsRequest::DropPartitionsRequest(const DropPartitionsRequest& other473) {
- dbName = other473.dbName;
- tblName = other473.tblName;
- parts = other473.parts;
- deleteData = other473.deleteData;
- ifExists = other473.ifExists;
- ignoreProtection = other473.ignoreProtection;
- environmentContext = other473.environmentContext;
- needResult = other473.needResult;
- __isset = other473.__isset;
-}
-DropPartitionsRequest& DropPartitionsRequest::operator=(const DropPartitionsRequest& other474) {
- dbName = other474.dbName;
- tblName = other474.tblName;
- parts = other474.parts;
- deleteData = other474.deleteData;
- ifExists = other474.ifExists;
- ignoreProtection = other474.ignoreProtection;
- environmentContext = other474.environmentContext;
- needResult = other474.needResult;
- __isset = other474.__isset;
+DropPartitionsRequest::DropPartitionsRequest(const DropPartitionsRequest& other475) {
+ dbName = other475.dbName;
+ tblName = other475.tblName;
+ parts = other475.parts;
+ deleteData = other475.deleteData;
+ ifExists = other475.ifExists;
+ ignoreProtection = other475.ignoreProtection;
+ environmentContext = other475.environmentContext;
+ needResult = other475.needResult;
+ __isset = other475.__isset;
+}
+DropPartitionsRequest& DropPartitionsRequest::operator=(const DropPartitionsRequest& other476) {
+ dbName = other476.dbName;
+ tblName = other476.tblName;
+ parts = other476.parts;
+ deleteData = other476.deleteData;
+ ifExists = other476.ifExists;
+ ignoreProtection = other476.ignoreProtection;
+ environmentContext = other476.environmentContext;
+ needResult = other476.needResult;
+ __isset = other476.__isset;
return *this;
}
void DropPartitionsRequest::printTo(std::ostream& out) const {
@@ -10843,9 +10975,9 @@ uint32_t ResourceUri::read(::apache::thrift::protocol::TProtocol* iprot) {
{
case 1:
if (ftype == ::apache::thrift::protocol::T_I32) {
- int32_t ecast475;
- xfer += iprot->readI32(ecast475);
- this->resourceType = (ResourceType::type)ecast475;
+ int32_t ecast477;
+ xfer += iprot->readI32(ecast477);
+ this->resourceType = (ResourceType::type)ecast477;
this->__isset.resourceType = true;
} else {
xfer += iprot->skip(ftype);
@@ -10896,15 +11028,15 @@ void swap(ResourceUri &a, ResourceUri &b) {
swap(a.__isset, b.__isset);
}
-ResourceUri::ResourceUri(const ResourceUri& other476) {
- resourceType = other476.resourceType;
- uri = other476.uri;
- __isset = other476.__isset;
+ResourceUri::ResourceUri(const ResourceUri& other478) {
+ resourceType = other478.resourceType;
+ uri = other478.uri;
+ __isset = other478.__isset;
}
-ResourceUri& ResourceUri::operator=(const ResourceUri& other477) {
- resourceType = other477.resourceType;
- uri = other477.uri;
- __isset = other477.__isset;
+ResourceUri& ResourceUri::operator=(const ResourceUri& other479) {
+ resourceType = other479.resourceType;
+ uri = other479.uri;
+ __isset = other479.__isset;
return *this;
}
void ResourceUri::printTo(std::ostream& out) const {
@@ -11007,9 +11139,9 @@ uint32_t Function::read(::apache::thrift::protocol::TProtocol* iprot) {
break;
case 5:
if (ftype == ::apache::thrift::protocol::T_I32) {
- int32_t ecast478;
- xfer += iprot->readI32(ecast478);
- this->ownerType = (PrincipalType::type)ecast478;
+ int32_t ecast480;
+ xfer += iprot->readI32(ecast480);
+ this->ownerType = (PrincipalType::type)ecast480;
this->__isset.ownerType = true;
} else {
xfer += iprot->skip(ftype);
@@ -11025,9 +11157,9 @@ uint32_t Function::read(::apache::thrift::protocol::TProtocol* iprot) {
break;
case 7:
if (ftype == ::apache::thrift::protocol::T_I32) {
- int32_t ecast479;
- xfer += iprot->readI32(ecast479);
- this->functionType = (FunctionType::type)ecast479;
+ int32_t ecast481;
+ xfer += iprot->readI32(ecast481);
+ this->functionType = (FunctionType::type)ecast481;
this->__isset.functionType = true;
} else {
xfer += iprot->skip(ftype);
@@ -11037,14 +11169,14 @@ uint32_t Function::read(::apache::thrift::protocol::TProtocol* iprot) {
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->resourceUris.clear();
- uint32_t _size480;
- ::apache::thrift::protocol::TType _etype483;
- xfer += iprot->readListBegin(_etype483, _size480);
- this->resourceUris.resize(_size480);
- uint32_t _i484;
- for (_i484 = 0; _i484 < _size480; ++_i484)
+ uint32_t _size482;
+ ::apache::thrift::protocol::TType _etype485;
+ xfer += iprot->readListBegin(_etype485, _size482);
+ this->resourceUris.resize(_size482);
+ uint32_t _i486;
+ for (_i486 = 0; _i486 < _size482; ++_i486)
{
- xfer += this->resourceUris[_i484].read(iprot);
+ xfer += this->resourceUris[_i486].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -11101,10 +11233,10 @@ uint32_t Function::write(::apache::thrift::protocol::TProtocol* oprot) const {
xfer += oprot->writeFieldBegin("resourceUris", ::apache::thrift::protocol::T_LIST, 8);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->resourceUris.size()));
- std::vector<ResourceUri> ::const_iterator _iter485;
- for (_iter485 = this->resourceUris.begin(); _iter485 != this->resourceUris.end(); ++_iter485)
+ std::vector<ResourceUri> ::const_iterator _iter487;
+ for (_iter487 = this->resourceUris.begin(); _iter487 != this->resourceUris.end(); ++_iter487)
{
- xfer += (*_iter485).write(oprot);
+ xfer += (*_iter487).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -11128,27 +11260,27 @@ void swap(Function &a, Function &b) {
swap(a.__isset, b.__isset);
}
-Function::Function(const Function& other486) {
- functionName = other486.functionName;
- dbName = other486.dbName;
- className = other486.className;
- ownerName = other486.ownerName;
- ownerType = other486.ownerType;
- createTime = other486.createTime;
- functionType = other486.functionType;
- resourceUris = other486.resourceUris;
- __isset = other486.__isset;
-}
-Function& Function::operator=(const Function& other487) {
- functionName = other487.functionName;
- dbName = other487.dbName;
- className = other487.className;
- ownerName = other487.ownerName;
- ownerType = other487.ownerType;
- createTime = other487.createTime;
- functionType = other487.functionType;
- resourceUris = other487.resourceUris;
- __isset = other487.__isset;
+Function::Function(const Function& other488) {
+ functionName = other488.functionName;
+ dbName = other488.dbName;
+ className = other488.className;
+ ownerName = other488.ownerName;
+ ownerType = other488.ownerType;
+ createTime = other488.createTime;
+ functionType = other488.functionType;
+ resourceUris = other488.resourceUris;
+ __isset = other488.__isset;
+}
+Function& Function::operator=(const Function& other489) {
+ functionName = other489.functionName;
+ dbName = other489.dbName;
+ className = other489.className;
+ ownerName = other489.ownerName;
+ ownerType = other489.ownerType;
+ createTime = other489.createTime;
+ functionType = other489.functionType;
+ resourceUris = other489.resourceUris;
+ __isset = other489.__isset;
return *this;
}
void Function::printTo(std::ostream& out) const {
@@ -11236,9 +11368,9 @@ uint32_t TxnInfo::read(::apache::thrift::protocol::TProtocol* iprot) {
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_I32) {
- int32_t ecast488;
- xfer += iprot->readI32(ecast488);
- this->state = (TxnState::type)ecast488;
+ int32_t ecast490;
+ xfer += iprot->readI32(ecast490);
+ this->state = (TxnState::type)ecast490;
isset_state = true;
} else {
xfer += iprot->skip(ftype);
@@ -11357,25 +11489,25 @@ void swap(TxnInfo &a, TxnInfo &b) {
swap(a.__isset, b.__isset);
}
-TxnInfo::TxnInfo(const TxnInfo& other489) {
- id = other489.id;
- state = other489.state;
- user = other489.user;
- hostname = other489.hostname;
- agentInfo = other489.agentInfo;
- heartbeatCount = other489.heartbeatCount;
- metaInfo = other489.metaInfo;
- __isset = other489.__isset;
-}
-TxnInfo& TxnInfo::operator=(const TxnInfo& other490) {
- id = other490.id;
- state = other490.state;
- user = other490.user;
- hostname = other490.hostname;
- agentInfo = other490.agentInfo;
- heartbeatCount = other490.heartbeatCount;
- metaInfo = other490.metaInfo;
- __isset = other490.__isset;
+TxnInfo::TxnInfo(const TxnInfo& other491) {
+ id = other491.id;
+ state = other491.state;
+ user = other491.user;
+ hostname = other491.hostname;
+ agentInfo = other491.agentInfo;
+ heartbeatCount = other491.heartbeatCount;
+ metaInfo = other491.metaInfo;
+ __isset = other491.__isset;
+}
+TxnInfo& TxnInfo::operator=(const TxnInfo& other492) {
+ id = other492.id;
+ state = other492.state;
+ user = other492.user;
+ hostname = other492.hostname;
+ agentInfo = other492.agentInfo;
+ heartbeatCount = other492.heartbeatCount;
+ metaInfo = other492.metaInfo;
+ __isset = other492.__isset;
return *this;
}
void TxnInfo::printTo(std::ostream& out) const {
@@ -11439,14 +11571,14 @@ uint32_t GetOpenTxnsInfoResponse::read(::apache::thrift::protocol::TProtocol* ip
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->open_txns.clear();
- uint32_t _size491;
- ::apache::thrift::protocol::TType _etype494;
- xfer += iprot->readListBegin(_etype494, _size491);
- this->open_txns.resize(_size491);
- uint32_t _i495;
- for (_i495 = 0; _i495 < _size491; ++_i495)
+ uint32_t _size493;
+ ::apache::thrift::protocol::TType _etype496;
+ xfer += iprot->readListBegin(_etype496, _size493);
+ this->open_txns.resize(_size493);
+ uint32_t _i497;
+ for (_i497 = 0; _i497 < _size493; ++_i497)
{
- xfer += this->open_txns[_i495].read(iprot);
+ xfer += this->open_txns[_i497].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -11483,10 +11615,10 @@ uint32_t GetOpenTxnsInfoResponse::write(::apache::thrift::protocol::TProtocol* o
xfer += oprot->writeFieldBegin("open_txns", ::apache::thrift::protocol::T_LIST, 2);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->open_txns.size()));
- std::vector<TxnInfo> ::const_iterator _iter496;
- for (_iter496 = this->open_txns.begin(); _iter496 != this->open_txns.end(); ++_iter496)
+ std::vector<TxnInfo> ::const_iterator _iter498;
+ for (_iter498 = this->open_txns.begin(); _iter498 != this->open_txns.end(); ++_iter498)
{
- xfer += (*_iter496).write(oprot);
+ xfer += (*_iter498).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -11503,13 +11635,13 @@ void swap(GetOpenTxnsInfoResponse &a, GetOpenTxnsInfoResponse &b) {
swap(a.open_txns, b.open_txns);
}
-GetOpenTxnsInfoResponse::GetOpenTxnsInfoResponse(const GetOpenTxnsInfoResponse& other497) {
- txn_high_water_mark = other497.txn_high_water_mark;
- open_txns = other497.open_txns;
+GetOpenTxnsInfoResponse::GetOpenTxnsInfoResponse(const GetOpenTxnsInfoResponse& other499) {
+ txn_high_water_mark = other499.txn_high_water_mark;
+ open_txns = other499.open_txns;
}
-GetOpenTxnsInfoResponse& GetOpenTxnsInfoResponse::operator=(const GetOpenTxnsInfoResponse& other498) {
- txn_high_water_mark = other498.txn_high_water_mark;
- open_txns = other498.open_txns;
+GetOpenTxnsInfoResponse& GetOpenTxnsInfoResponse::operator=(const GetOpenTxnsInfoResponse& other500) {
+ txn_high_water_mark = other500.txn_high_water_mark;
+ open_txns = other500.open_txns;
return *this;
}
void GetOpenTxnsInfoResponse::printTo(std::ostream& out) const {
@@ -11568,15 +11700,15 @@ uint32_t GetOpenTxnsResponse::read(::apache::thrift::protocol::TProtocol* iprot)
if (ftype == ::apache::thrift::protocol::T_SET) {
{
this->open_txns.clear();
- uint32_t _size499;
- ::apache::thrift::protocol::TType _etype502;
- xfer += iprot->readSetBegin(_etype502, _size499);
- uint32_t _i503;
- for (_i503 = 0; _i503 < _size499; ++_i503)
+ uint32_t _size501;
+ ::apache::thrift::protocol::TType _etype504;
+ xfer += iprot->readSetBegin(_etype504, _size501);
+ uint32_t _i505;
+ for (_i505 = 0; _i505 < _size501; ++_i505)
{
- int64_t _elem504;
- xfer += iprot->readI64(_elem504);
- this->open_txns.insert(_elem504);
+ int64_t _elem506;
+ xfer += iprot->readI64(_elem506);
+ this->open_txns.insert(_elem506);
}
xfer += iprot->readSetEnd();
}
@@ -11613,10 +11745,10 @@ uint32_t GetOpenTxnsResponse::write(::apache::thrift::protocol::TProtocol* oprot
xfer += oprot->writeFieldBegin("open_txns", ::apache::thrift::protocol::T_SET, 2);
{
xfer += oprot->writeSetBegin(::apache::thrift::protocol::T_I64, static_cast<uint32_t>(this->open_txns.size()));
- std::set<int64_t> ::const_iterator _iter505;
- for (_iter505 = this->open_txns.begin(); _iter505 != this->open_txns.end(); ++_iter505)
+ std::set<int64_t> ::const_iterator _iter507;
+ for (_iter507 = this->open_txns.begin(); _iter507 != this->open_txns.end(); ++_iter507)
{
- xfer += oprot->writeI64((*_iter505));
+ xfer += oprot->writeI64((*_iter507));
}
xfer += oprot->writeSetEnd();
}
@@ -11633,13 +11765,13 @@ void swap(GetOpenTxnsResponse &a, GetOpenTxnsResponse &b) {
swap(a.open_txns, b.open_txns);
}
-GetOpenTxnsResponse::GetOpenTxnsResponse(const GetOpenTxnsResponse& other506) {
- txn_high_water_mark = other506.txn_high_water_mark;
- open_txns = other506.open_txns;
+GetOpenTxnsResponse::GetOpenTxnsResponse(const GetOpenTxnsResponse& other508) {
+ txn_high_water_mark = other508.txn_high_water_mark;
+ open_txns = other508.open_txns;
}
-GetOpenTxnsResponse& GetOpenTxnsResponse::operator=(const GetOpenTxnsResponse& other507) {
- txn_high_water_mark = other507.txn_high_water_mark;
- open_txns = other507.open_txns;
+GetOpenTxnsResponse& GetOpenTxnsResponse::operator=(const GetOpenTxnsResponse& other509) {
+ txn_high_water_mark = other509.txn_high_water_mark;
+ open_txns = other509.open_txns;
return *this;
}
void GetOpenTxnsResponse::printTo(std::ostream& out) const {
@@ -11782,19 +11914,19 @@ void swap(OpenTxnRequest &a, OpenTxnRequest &b) {
swap(a.__isset, b.__isset);
}
-OpenTxnRequest::OpenTxnRequest(const OpenTxnRequest& other508) {
- num_txns = other508.num_txns;
- user = other508.user;
- hostname = other508.hostname;
- agentInfo = other508.agentInfo;
- __isset = other508.__isset;
+OpenTxnRequest::OpenTxnRequest(const OpenTxnRequest& other510) {
+ num_txns = other510.num_txns;
+ user = other510.user;
+ hostname = other510.hostname;
+ agentInfo = other510.agentInfo;
+ __isset = other510.__isset;
}
-OpenTxnRequest& OpenTxnRequest::operator=(const OpenTxnRequest& other509) {
- num_txns = other509.num_txns;
- user = other509.user;
- hostname = other509.hostname;
- agentInfo = other509.agentInfo;
- __isset = other509.__isset;
+OpenTxnRequest& OpenTxnRequest::operator=(const OpenTxnRequest& other511) {
+ num_txns = other511.num_txns;
+ user = other511.user;
+ hostname = other511.hostname;
+ agentInfo = other511.agentInfo;
+ __isset = other511.__isset;
return *this;
}
void OpenTxnRequest::printTo(std::ostream& out) const {
@@ -11842,14 +11974,14 @@ uint32_t OpenTxnsResponse::read(::apache::thrift::protocol::TProtocol* iprot) {
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->txn_ids.clear();
- uint32_t _size510;
- ::apache::thrift::protocol::TType _etype513;
- xfer += iprot->readListBegin(_etype513, _size510);
- this->txn_ids.resize(_size510);
- uint32_t _i514;
- for (_i514 = 0; _i514 < _size510; ++_i514)
+ uint32_t _size512;
+ ::apache::thrift::protocol::TType _etype515;
+ xfer += iprot->readListBegin(_etype515, _size512);
+ this->txn_ids.resize(_size512);
+ uint32_t _i516;
+ for (_i516 = 0; _i516 < _size512; ++_i516)
{
- xfer += iprot->readI64(this->txn_ids[_i514]);
+ xfer += iprot->readI64(this->txn_ids[_i516]);
}
xfer += iprot->readListEnd();
}
@@ -11880,10 +12012,10 @@ uint32_t OpenTxnsResponse::write(::apache::thrift::protocol::TProtocol* oprot) c
xfer += oprot->writeFieldBegin("txn_ids", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast<uint32_t>(this->txn_ids.size()));
- std::vector<int64_t> ::const_iterator _iter515;
- for (_iter515 = this->txn_ids.begin(); _iter515 != this->txn_ids.end(); ++_iter515)
+ std::vector<int64_t> ::const_iterator _iter517;
+ for (_iter517 = this->txn_ids.begin(); _iter517 != this->txn_ids.end(); ++_iter517)
{
- xfer += oprot->writeI64((*_iter515));
+ xfer += oprot->writeI64((*_iter517));
}
xfer += oprot->writeListEnd();
}
@@ -11899,11 +12031,11 @@ void swap(OpenTxnsResponse &a, OpenTxnsResponse &b) {
swap(a.txn_ids, b.txn_ids);
}
-OpenTxnsResponse::OpenTxnsResponse(const OpenTxnsResponse& other516) {
- txn_ids = other516.txn_ids;
+OpenTxnsResponse::OpenTxnsResponse(const OpenTxnsResponse& other518) {
+ txn_ids = other518.txn_ids;
}
-OpenTxnsResponse& OpenTxnsResponse::operator=(const OpenTxnsResponse& other517) {
- txn_ids = other517.txn_ids;
+OpenTxnsResponse& OpenTxnsResponse::operator=(const OpenTxnsResponse& other519) {
+ txn_ids = other519.txn_ids;
return *this;
}
void OpenTxnsResponse::printTo(std::ostream& out) const {
@@ -11985,11 +12117,11 @@ void swap(AbortTxnRequest &a, AbortTxnRequest &b) {
swap(a.txnid, b.txnid);
}
-AbortTxnRequest::AbortTxnRequest(const AbortTxnRequest& other518) {
- txnid = other518.txnid;
+AbortTxnRequest::AbortTxnRequest(const AbortTxnRequest& other520) {
+ txnid = other520.txnid;
}
-AbortTxnRequest& AbortTxnRequest::operator=(const AbortTxnRequest& other519) {
- txnid = other519.txnid;
+AbortTxnRequest& AbortTxnRequest::operator=(const AbortTxnRequest& other521) {
+ txnid = other521.txnid;
return *this;
}
void AbortTxnRequest::printTo(std::ostream& out) const {
@@ -12071,11 +12203,11 @@ void swap(CommitTxnRequest &a, CommitTxnRequest &b) {
swap(a.txnid, b.txnid);
}
-CommitTxnRequest::CommitTxnRequest(const CommitTxnRequest& other520) {
- txnid = other520.txnid;
+CommitTxnRequest::CommitTxnRequest(const CommitTxnRequest& other522) {
+ txnid = other522.txnid;
}
-CommitTxnRequest& CommitTxnRequest::operator=(const CommitTxnRequest& other521) {
- txnid = other521.txnid;
+CommitTxnRequest& CommitTxnRequest::operator=(const CommitTxnRequest& other523) {
+ txnid = other523.txnid;
return *this;
}
void CommitTxnRequest::printTo(std::ostream& out) const {
@@ -12138,9 +12270,9 @@ uint32_t LockComponent::read(::apache::thrift::protocol::TProtocol* iprot) {
{
case 1:
if (ftype == ::apache::thrift::protocol::T_I32) {
- int32_t ecast522;
- xfer += iprot->readI32(ecast522);
- this->type = (LockType::type)ecast522;
+ int32_t ecast524;
+ xfer += iprot->readI32(ecast524);
+ this->type = (LockType::type)ecast524;
isset_type = true;
} else {
xfer += iprot->skip(ftype);
@@ -12148,9 +12280,9 @@ uint32_t LockComponent::read(::apache::thrift::protocol::TProtocol* iprot) {
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_I32) {
- int32_t ecast523;
- xfer += iprot->readI32(ecast523);
- this->level = (LockLevel::type)ecast523;
+ int32_t ecast525;
+ xfer += iprot->readI32(ecast525);
+ this->level = (LockLevel::type)ecast525;
isset_level = true;
} else {
xfer += iprot->skip(ftype);
@@ -12240,21 +12372,21 @@ void swap(LockComponent &a, LockComponent &b) {
swap(a.__isset, b.__isset);
}
-LockComponent::LockComponent(const LockComponent& other524) {
- type = other524.type;
- level = other524.level;
- dbname = other524.dbname;
- tablename = other524.tablename;
- partitionname = other524.partitionname;
- __isset = other524.__isset;
-}
-LockComponent& LockComponent::operator=(const LockComponent& other525) {
- type = other525.type;
- level = other525.level;
- dbname = other525.dbname;
- tablename = other525.tablename;
- partitionname = other525.partitionname;
- __isset = other525.__isset;
+LockComponent::LockComponent(const LockComponent& other526) {
+ type = other526.type;
+ level = other526.level;
+ dbname = other526.dbname;
+ tablename = other526.tablename;
+ partitionname = other526.partitionname;
+ __isset = other526.__isset;
+}
+LockComponent& LockComponent::operator=(const LockComponent& other527) {
+ type = other527.type;
+ level = other527.level;
+ dbname = other527.dbname;
+ tablename = other527.tablename;
+ partitionname = other527.partitionname;
+ __isset = other527.__isset;
return *this;
}
void LockComponent::printTo(std::ostream& out) const {
@@ -12323,14 +12455,14 @@ uint32_t LockRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->component.clear();
- uint32_t _size526;
- ::apache::thrift::protocol::TType _etype529;
- xfer += iprot->readListBegin(_etype529, _size526);
- this->component.resize(_size526);
- uint32_t _i530;
- for (_i530 = 0; _i530 < _size526; ++_i530)
+ uint32_t _size528;
+ ::apache::thrift::protocol::TType _etype531;
+ xfer += iprot->readListBegin(_etype531, _size528);
+ this->component.resize(_size528);
+ uint32_t _i532;
+ for (_i532 = 0; _i532 < _size528; ++_i532)
{
- xfer += this->component[_i530].read(iprot);
+ xfer += this->component[_i532].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -12397,10 +12529,10 @@ uint32_t LockRequest::write(::apache::thrift::protocol::TProtocol* oprot) const
xfer += oprot->writeFieldBegin("component", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->component.size()));
- std::vector<LockComponent> ::const_iterator _iter531;
- for (_iter531 = this->component.begin(); _iter531 != this->component.end(); ++_iter531)
+ std::vector<LockComponent> ::const_iterator _iter533;
+ for (_iter533 = this->component.begin(); _iter533 != this->component.end(); ++_iter533)
{
- xfer += (*_iter531).write(oprot);
+ xfer += (*_iter533).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -12439,21 +12571,21 @@ void swap(LockRequest &a, LockRequest &b) {
swap(a.__isset, b.__isset);
}
-LockRequest::LockRequest(const LockRequest& other532) {
- component = other532.component;
- txnid = other532.txnid;
- user = other532.user;
- hostname = other532.hostname;
- agentInfo = other532.agentInfo;
- __isset = other532.__isset;
-}
-LockRequest& LockRequest::operator=(const LockRequest& other533) {
- component = other533.component;
- txnid = other533.txnid;
- user = other533.user;
- hostname = other533.hostname;
- agentInfo = other533.agentInfo;
- __isset = other533.__isset;
+LockRequest::LockRequest(const LockRequest& other534) {
+ component = other534.component;
+ txnid = other534.txnid;
+ user = other534.user;
+ hostname = other534.hostname;
+ agentInfo = other534.agentInfo;
+ __isset = other534.__isset;
+}
+LockRequest& LockRequest::operator=(const LockRequest& other535) {
+ component = other535.component;
+ txnid = other535.txnid;
+ user = other535.user;
+ hostname = other535.hostname;
+ agentInfo = other535.agentInfo;
+ __isset = other535.__isset;
return *this;
}
void LockRequest::printTo(std::ostream& out) const {
@@ -12513,9 +12645,9 @@ uint32_t LockResponse::read(::apache::thrift::protocol::TProtocol* iprot) {
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_I32) {
- int32_t ecast534;
- xfer += iprot->readI32(ecast534);
- this->state = (LockState::type)ecast534;
+ int32_t ecast536;
+ xfer += iprot->readI32(ecast536);
+ this->state = (LockState::type)ecast536;
isset_state = true;
} else {
xfer += iprot->skip(ftype);
@@ -12561,13 +12693,13 @@ void swap(LockResponse &a, LockResponse &b) {
swap(a.state, b.state);
}
-LockResponse::LockResponse(const LockResponse& other535) {
- lockid = other535.lockid;
- state = other535.state;
+LockResponse::LockResponse(const LockResponse& other537) {
+ lockid = other537.lockid;
+ state = other537.state;
}
-LockResponse& LockResponse::operator=(const LockResponse& other536) {
- lockid = other536.lockid;
- state = other536.state;
+LockResponse& LockResponse::operator=(const LockResponse& other538) {
+ lockid = other538.lockid;
+ state = other538.state;
return *this;
}
void LockResponse::printTo(std::ostream& out) const {
@@ -12689,17 +12821,17 @@ void swap(CheckLockRequest &a, CheckLockRequest &b) {
swap(a.__isset, b.__isset);
}
-CheckLockRequest::CheckLockRequest(const CheckLockRequest& other537) {
- lockid = other537.lockid;
- txnid = other537.txnid;
- elapsed_ms = other537.elapsed_ms;
- __isset = other537.__isset;
+CheckLockRequest::CheckLockRequest(const CheckLockRequest& other539) {
+ lockid = other539.lockid;
+ txnid = other539.txnid;
+ elapsed_ms = other539.elapsed_ms;
+ __isset = other539.__isset;
}
-CheckLockRequest& CheckLockRequest::operator=(const CheckLockRequest& other538) {
- lockid = other538.lockid;
- txnid = other538.txnid;
- elapsed_ms = other538.elapsed_ms;
- __isset = other538.__isset;
+CheckLockRequest& CheckLockRequest::operator=(const CheckLockRequest& other540) {
+ lockid = other540.lockid;
+ txnid = other540.txnid;
+ elapsed_ms = other540.elapsed_ms;
+ __isset = other540.__isset;
return *this;
}
void CheckLockRequest::printTo(std::ostream& out) const {
@@ -12783,11 +12915,11 @@ void swap(UnlockRequest &a, UnlockRequest &b) {
swap(a.lockid, b.lockid);
}
-UnlockRequest::UnlockRequest(const UnlockRequest& other539) {
- lockid = other539.lockid;
+UnlockRequest::UnlockRequest(const UnlockRequest& other541) {
+ lockid = other541.lockid;
}
-UnlockRequest& UnlockRequest::operator=(const UnlockRequest& other540) {
- lockid = other540.lockid;
+UnlockRequest& UnlockRequest::operator=(const UnlockRequest& other542) {
+ lockid = other542.lockid;
return *this;
}
void UnlockRequest::printTo(std::ostream& out) const {
@@ -12926,19 +13058,19 @@ void swap(ShowLocksRequest &a, ShowLocksRequest &b) {
swap(a.__isset, b.__isset);
}
-ShowLocksRequest::ShowLocksRequest(const ShowLocksRequest& other541) {
- dbname = other541.dbname;
- tablename = other541.tablename;
- partname = other541.partname;
- isExtended = other541.isExtended;
- __isset = other541.__isset;
+ShowLocksRequest::ShowLocksRequest(const ShowLocksRequest& other543) {
+ dbname = other543.dbname;
+ tablename = other543.tablename;
+ partname = other543.partname;
+ isExtended = other543.isExtended;
+ __isset = other543.__isset;
}
-ShowLocksRequest& ShowLocksRequest::operator=(const ShowLocksRequest& other542) {
- dbname = other542.dbname;
- tablename = other542.tablename;
- partname = other542.partname;
- isExtended = other542.isExtended;
- __isset = other542.__isset;
+ShowLocksRequest& ShowLocksRequest::operator=(const ShowLocksRequest& other544) {
+ dbname = other544.dbname;
+ tablename = other544.tablename;
+ partname = other544.partname;
+ isExtended = other544.isExtended;
+ __isset = other544.__isset;
return *this;
}
void ShowLocksRequest::printTo(std::ostream& out) const {
@@ -13091,9 +13223,9 @@ uint32_t ShowLocksResponseElement::read(::apache::thrift::protocol::TProtocol* i
break;
case 5:
if (ftype == ::apache::thrift::protocol::T_I32) {
- int32_t ecast543;
- xfer += iprot->readI32(ecast543);
- this->state = (LockState::type)ecast543;
+ int32_t ecast545;
+ xfer += iprot->readI32(ecast545);
+ this->state = (LockState::type)ecast545;
isset_state = true;
} else {
xfer += iprot->skip(ftype);
@@ -13101,9 +13233,9 @@ uint32_t ShowLocksResponseElement::read(::apache::thrift::protocol::TProtocol* i
break;
case 6:
if (ftype == ::apache::thrift::protocol::T_I32) {
- int32_t ecast544;
- xfer += iprot->readI32(ecast544);
- this->type = (LockType::type)ecast544;
+ int32_t ecast546;
+ xfer += iprot->readI32(ecast546);
+ this->type = (LockType::type)ecast546;
isset_type = true;
} else {
xfer += iprot->skip(ftype);
@@ -13319,43 +13451,43 @@ void swap(ShowLocksResponseElement &a, ShowLocksResponseElement &b) {
swap(a.__isset, b.__isset);
}
-ShowLocksResponseElement::ShowLocksResponseElement(const ShowLocksResponseElement& other545) {
- lockid = other545.lockid;
- dbname = other545.dbname;
- tablename = other545.tablename;
- partname = other545.partname;
- state = other545.state;
- type = other545.type;
- txnid = other545.txnid;
- lastheartbeat = other545.lastheartbeat;
- acquiredat = other545.acquiredat;
- user = other545.user;
- hostname = other545.hostname;
- heartbeatCount = other545.heartbeatCount;
- agentInfo = other545.agentInfo;
- blockedByExtId = other545.blockedByExtId;
- blockedByIntId = other545.blockedByIntId;
- lockIdInternal = other545.lockIdInternal;
- __isset = other545.__isset;
-}
-ShowLocksResponseElement& ShowLocksResponseElement::operator=(const ShowLocksResponseElement& other546) {
- lockid = other546.lockid;
- dbname = other546.dbname;
- tablename = other546.tablename;
- partname = other546.partname;
- state = other546.state;
- type = other546.type;
- txnid = other546.txnid;
- lastheartbeat = other546.lastheartbeat;
- acquiredat = other546.acquiredat;
- user = other546.user;
- hostname = other546.hostname;
- heartbeatCount = other546.heartbeatCount;
- agentInfo = other546.agentInfo;
- blockedByExtId = other546.blockedByExtId;
- blockedByIntId = other546.blockedByIntId;
- lockIdInternal = other546.lockIdInternal;
- __isset = other546.__isset;
+ShowLocksResponseElement::ShowLocksResponseElement(const ShowLocksResponseElement& other547) {
+ lockid = other547.lockid;
+ dbname = other547.dbname;
+ tablename = other547.tablename;
+ partname = other547.partname;
+ state = other547.state;
+ type = other547.type;
+ txnid = other547.txnid;
+ lastheartbeat = other547.lastheartbeat;
+ acquiredat = other547.acquiredat;
+ user = other547.user;
+ hostname = other547.hostname;
+ heartbeatCount = other547.heartbeatCount;
+ agentInfo = other547.agentInfo;
+ blockedByExtId = other547.blockedByExtId;
+ blockedByIntId = other547.blockedByIntId;
+ lockIdInternal = other547.lockIdInternal;
+ __isset = other547.__isset;
+}
+ShowLocksResponseElement& ShowLocksResponseElement::operator=(const ShowLocksResponseElement& other548) {
+ lockid = other548.lockid;
+ dbname = other548.dbname;
+ tablename = other548.tablename;
+ partname = other548.partname;
+ state = other548.state;
+ type = other548.type;
+ txnid = other548.txnid;
+ lastheartbeat = other548.lastheartbeat;
+ acquiredat = other548.acquiredat;
+ user = other548.user;
+ hostname = other548.hostname;
+ heartbeatCount = other548.heartbeatCount;
+ agentInfo = other548.agentInfo;
+ blockedByExtId = other548.blockedByExtId;
+ blockedByIntId = other548.blockedByIntId;
+ lockIdInternal = other548.lockIdInternal;
+ __isset = other548.__isset;
return *this;
}
void ShowLocksResponseElement::printTo(std::ostream& out) const {
@@ -13414,14 +13546,14 @@ uint32_t ShowLocksResponse::read(::apache::thrift::protocol::TProtocol* iprot) {
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->locks.clear();
- uint32_t _size547;
- ::apache::thrift::protocol::TType _etype550;
- xfer += iprot->readListBegin(_etype550, _size547);
- this->locks.resize(_size547);
- uint32_t _i551;
- for (_i551 = 0; _i551 < _size547; ++_i551)
+ uint32_t _size549;
+ ::apache::thrift::protocol::TType _etype552;
+ xfer += iprot->readListBegin(_etype552, _size549);
+ this->locks.resize(_size549);
+ uint32_t _i553;
+ for (_i553 = 0; _i553 < _size549; ++_i553)
{
- xfer += this->locks[_i551].read(iprot);
+ xfer += this->locks[_i553].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -13450,10 +13582,10 @@ uint32_t ShowLocksResponse::write(::apache::thrift::protocol::TProtocol* oprot)
xfer += oprot->writeFieldBegin("locks", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->locks.size()));
- std::vector<ShowLocksResponseElement> ::const_iterator _iter552;
- for (_iter552 = this->locks.begin(); _iter552 != this->locks.end(); ++_iter552)
+ std::vector<ShowLocksResponseElement> ::const_iterator _iter554;
+ for (_iter554 = this->locks.begin(); _iter554 != this->locks.end(); ++_iter554)
{
- xfer += (*_iter552).write(oprot);
+ xfer += (*_iter554).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -13470,13 +13602,13 @@ void swap(ShowLocksResponse &a, ShowLocksResponse &b) {
swap(a.__isset, b.__isset);
}
-ShowLocksResponse::ShowLocksResponse(const ShowLocksResponse& other553) {
- locks = other553.locks;
- __isset = other553.__isset;
+ShowLocksResponse::ShowLocksResponse(const ShowLocksResponse& other555) {
+ locks = other555.locks;
+ __isset = other555.__isset;
}
-ShowLocksResponse& ShowLocksResponse::operator=(const ShowLocksResponse& other554) {
- locks = other554.locks;
- __isset = other554.__isset;
+ShowLocksResponse& ShowLocksResponse::operator=(const ShowLocksResponse& other556) {
+ locks = other556.locks;
+ __isset = other556.__isset;
return *this;
}
void ShowLocksResponse::printTo(std::ostream& out) const {
@@ -13577,15 +13709,15 @@ void swap(HeartbeatRequest &a, HeartbeatRequest &b) {
swap(a.__isset, b.__isset);
}
-HeartbeatRequest::HeartbeatRequest(const HeartbeatRequest& other555) {
- lockid = other555.lockid;
- txnid = other555.txnid;
- __isset = other555.__isset;
+HeartbeatRequest::HeartbeatRequest(const HeartbeatRequest& other557) {
+ lockid = other557.lockid;
+ txnid = other557.txnid;
+ __isset = other557.__isset;
}
-HeartbeatRequest& HeartbeatRequest::operator=(const HeartbeatRequest& other556) {
- lockid = other556.lockid;
- txnid = other556.txnid;
- __isset = other556.__isset;
+HeartbeatRequest& HeartbeatRequest::operator=(const HeartbeatRequest& other558) {
+ lockid = other558.lockid;
+ txnid = other558.txnid;
+ __isset = other558.__isset;
return *this;
}
void HeartbeatRequest::printTo(std::ostream& out) const {
@@ -13688,13 +13820,13 @@ void swap(HeartbeatTxnRangeRequest &a, HeartbeatTxnRangeRequest &b) {
swap(a.max, b.max);
}
-HeartbeatTxnRangeRequest::HeartbeatTxnRangeRequest(const HeartbeatTxnRangeRequest& other557) {
- min = other557.min;
- max = other557.max;
+HeartbeatTxnRangeRequest::HeartbeatTxnRangeRequest(const HeartbeatTxnRangeRequest& other559) {
+ min = other559.min;
+ max = other559.max;
}
-HeartbeatTxnRangeRequest& HeartbeatTxnRangeRequest::operator=(const HeartbeatTxnRangeRequest& other558) {
- min = other558.min;
- max = other558.max;
+HeartbeatTxnRangeRequest& HeartbeatTxnRangeRequest::operator=(const HeartbeatTxnRangeRequest& other560) {
+ min = other560.min;
+ max = other560.max;
return *this;
}
void HeartbeatTxnRangeRequest::printTo(std::ostream& out) const {
@@ -13745,15 +13877,15 @@ uint32_t HeartbeatTxnRangeResponse::read(::apache::thrift::protocol::TProtocol*
if (ftype == ::apache::thrift::protocol::T_SET) {
{
this->aborted.clear();
- uint32_t _size559;
- ::apache::thrift::protocol::TType _etype562;
- xfer += iprot->readSetBegin(_etype562, _size559);
- uint32_t _i563;
- for (_i563 = 0; _i563 < _size559; ++_i563)
+ uint32_t _size561;
+ ::apache::thrift::protocol::TType _etype564;
+ xfer += iprot->readSetBegin(_etype564, _size561);
+ uint32_t _i565;
+ for (_i565 = 0; _i565 < _size561; ++_i565)
{
- int64_t _elem564;
- xfer += iprot->readI64(_elem564);
- this->aborted.insert(_elem564);
+ int64_t _elem566;
+ xfer += iprot->readI64(_elem566);
+ this->aborted.insert(_elem566);
}
xfer += iprot->readSetEnd();
}
@@ -13766,15 +13898,15 @@ uint32_t HeartbeatTxnRangeResponse::read(::apache::thrift::protocol::TProtocol*
if (ftype == ::apache::thrift::protocol::T_SET) {
{
this->nosuch.clear();
- uint32_t _size565;
- ::apache::thrift::protocol::TType _etype568;
- xfer += iprot->readSetBegin(_etype568, _size565);
- uint32_t _i569;
- for (_i569 = 0; _i569 < _size565; ++_i569)
+ uint32_t _size567;
+ ::apache::thrift::protocol::TType _etype570;
+ xfer += iprot->readSetBegin(_etype570, _size567);
+ uint32_t _i571;
+ for (_i571 = 0; _i571 < _size567; ++_i571)
{
- int64_t _elem570;
- xfer += iprot->readI64(_elem570);
- this->nosuch.insert(_elem570);
+ int64_t _elem572;
+ xfer += iprot->readI64(_elem572);
+ this->nosuch.insert(_elem572);
}
xfer += iprot->readSetEnd();
}
@@ -13807,10 +13939,10 @@ uint32_t HeartbeatTxnRangeResponse::write(::apache::thrift::protocol::TProtocol*
xfer += oprot->writeFieldBegin("aborted", ::apache::thrift::protocol::T_SET, 1);
{
xfer += oprot->writeSetBegin(::apache::thrift::protocol::T_I64, static_cast<uint32_t>(this->aborted.size()));
- std::set<int64_t> ::const_iterator _iter571;
- for (_iter571 = this->aborted.begin(); _iter571 != this->aborted.end(); ++_iter571)
+ std::set<int64_t> ::const_iterator _iter573;
+ for (_iter573 = this->aborted.begin(); _iter573 != this->aborted.end(); ++_iter573)
{
- xfer += oprot->writeI64((*_iter571));
+ xfer += oprot->writeI64((*_iter573));
}
xfer += oprot->writeSetEnd();
}
@@ -13819,10 +13951,10 @@ uint32_t HeartbeatTxnRangeResponse::write(::apache::thrift::protocol::TProtocol*
xfer += oprot->writeFieldBegin("nosuch", ::apache::thrift::protocol::T_SET, 2);
{
xfer += oprot->writeSetBegin(::apache::thrift::protocol::T_I64, static_cast<uint32_t>(this->nosuch.size()));
- std::set<int64_t> ::const_iterator _iter572;
- for (_iter572 = this->nosuch.begin(); _iter572 != this->nosuch.end(); ++_iter572)
+ std::set<int64_t> ::const_iterator _iter574;
+ for (_iter574 = this->nosuch.begin(); _iter574 != this->nosuch.end(); ++_iter574)
{
- xfer += oprot->writeI64((*_iter572));
+ xfer += oprot->writeI64((*_iter574));
}
xfer += oprot->writeSetEnd();
}
@@ -13839,13 +13971,13 @@ void swap(HeartbeatTxnRangeResponse &a, HeartbeatTxnRangeResponse &b) {
swap(a.nosuch, b.nosuch);
}
-HeartbeatTxnRangeResponse::HeartbeatTxnRangeResponse(const HeartbeatTxnRangeResponse& other573) {
- aborted = other573.aborted;
- nosuch = other573.nosuch;
+HeartbeatTxnRangeResponse::HeartbeatTxnRangeResponse(const HeartbeatTxnRangeResponse& other575) {
+ aborted = other575.aborted;
+ nosuch = other575.nosuch;
}
-HeartbeatTxnRangeResponse& HeartbeatTxnRangeResponse::operator=(const HeartbeatTxnRangeResponse& other574) {
- aborted = other574.aborted;
- nosuch = other574.nosuch;
+HeartbeatTxnRangeResponse& HeartbeatTxnRangeResponse::operator=(const HeartbeatTxnRangeResponse& other576) {
+ aborted = other576.aborted;
+ nosuch = other576.nosuch;
return *this;
}
void HeartbeatTxnRangeResponse::printTo(std::ostream& out) const {
@@ -13933,9 +14065,9 @@ uint32_t CompactionRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
break;
case 4:
if (ftype == ::apache::thrift::protocol::T_I32) {
- int32_t ecast575;
- xfer += iprot->readI32(ecast575);
- this->type = (CompactionType::type)ecast575;
+ int32_t ecast577;
+ xfer += iprot->readI32(ecast577);
+ this->type = (CompactionType::type)ecast577;
isset_type = true;
} else {
xfer += iprot->skip(ftype);
@@ -14009,21 +14141,21 @@ void swap(CompactionRequest &a, CompactionRequest &b) {
swap(a.__isset, b.__isset);
}
-CompactionRequest::CompactionRequest(const CompactionRequest& other576) {
- dbname = other576.dbname;
- tablename = other576.tablename;
- partitionname = other576.partitionname;
- type = other576.type;
- runas = other576.runas;
- __isset = other576.__isset;
-}
-CompactionRequest& CompactionRequest::operator=(const CompactionRequest& other577) {
- dbname = other577.dbname;
- tablename = other577.tablename;
- partitionname = other577.partitionname;
- type = other577.type;
- runas = other577.runas;
- __isset = other577.__isset;
+CompactionRequest::CompactionRequest(const CompactionRequest& other578) {
+ dbname = other578.dbname;
+ tablename = other578.tablename;
+ partitionname = other578.partitionname;
+ type = other578.type;
+ runas = other578.runas;
+ __isset = other578.__isset;
+}
+CompactionRequest& CompactionRequest::operator=(const CompactionRequest& other579) {
+ dbname = other579.dbname;
+ tablename = other579.tablename;
+ partitionname = other579.partitionname;
+ type = other579.type;
+ runas = other579.runas;
+ __isset = other579.__isset;
return *this;
}
void CompactionRequest::printTo(std::ostream& out) const {
@@ -14086,11 +14218,11 @@ void swap(ShowCompactRequest &a, ShowCompactRequest &b) {
(void) b;
}
-ShowCompactRequest::ShowCompactRequest(const ShowCompactRequest& other578) {
- (void) other578;
+ShowCompactRequest::ShowCompactRequest(const ShowCompactRequest& other580) {
+ (void) other580;
}
-ShowCompactRequest& ShowCompactRequest::operator=(const ShowCompactRequest& other579) {
- (void) other579;
+ShowCompactRequest& ShowCompactRequest::operator=(const ShowCompactRequest& other581) {
+ (void) other581;
return *this;
}
void ShowCompactRequest::printTo(std::ostream& out) const {
@@ -14211,9 +14343,9 @@ uint32_t ShowCompactResponseElement::read(::apache::thrift::protocol::TProtocol*
break;
case 4:
if (ftype == ::apache::thrift::protocol::T_I32) {
- int32_t ecast580;
- xfer += iprot->readI32(ecast580);
- this->type = (CompactionType::type)ecast580;
+ int32_t ecast582;
+ xfer += iprot->readI32(ecast582);
+ this->type = (CompactionType::type)ecast582;
isset_type = true;
} else {
xfer += iprot->skip(ftype);
@@ -14386,35 +14518,35 @@ void swap(ShowCompactResponseElement &a, ShowCompactResponseElement &b) {
swap(a.__isset, b.__isset);
}
-ShowCompactResponseElement::ShowCompactResponseElement(const ShowCompactResponseElement& other581) {
- dbname = other581.dbname;
- tablename = other581.tablename;
- partitionname = other581.partitionname;
- type = other581.type;
- state = other581.state;
- workerid = other581.workerid;
- start = other581.start;
- runAs = other581.runAs;
- hightestTxnId = other581.hightestTxnId;
- metaInfo = other581.metaInfo;
- endTime = other581.endTime;
- hadoopJobId = other581.hadoopJobId;
- __isset = other581.__isset;
-}
-ShowCompactResponseElement& ShowCompactResponseElement::operator=(const ShowCompactResponseElement& other582) {
- dbname = other582.dbname;
- tablename = other582.tablename;
- partitionname = other582.partitionname;
- type = other582.type;
- state = other582.state;
- workerid = other582.workerid;
- start = other582.start;
- runAs = other582.runAs;
- hightestTxnId = other582.hightestTxnId;
- metaInfo = other582.metaInfo;
- endTime = other582.endTime;
- hadoopJobId = other582.hadoopJobId;
- __isset = other582.__isset;
+ShowCompactResponseElement::ShowCompactResponseElement(const ShowCompactResponseElement& other583) {
+ dbname = other583.dbname;
+ tablename = other583.tablename;
+ partitionname = other583.partitionname;
+ type = other583.type;
+ state = other583.state;
+ workerid = other583.workerid;
+ start = other583.start;
+ runAs = other583.runAs;
+ hightestTxnId = other583.hightestTxnId;
+ metaInfo = other583.metaInfo;
+ endTime = other583.endTime;
+ hadoopJobId = other583.hadoopJobId;
+ __isset = other583.__isset;
+}
+ShowCompactResponseElement& ShowCompactResponseElement::operator=(const ShowCompactResponseElement& other584) {
+ dbname = other584.dbname;
+ tablename = other584.tablename;
+ partitionname = other584.partitionname;
+ type = other584.type;
+ state = other584.state;
+ workerid = other584.workerid;
+ start = other584.start;
+ runAs = other584.runAs;
+ hightestTxnId = other584.hightestTxnId;
+ metaInfo = other584.metaInfo;
+ endTime = other584.endTime;
+ hadoopJobId = other584.hadoopJobId;
+ __isset = other584.__isset;
return *this;
}
void ShowCompactResponseElement::printTo(std::ostream& out) const {
@@ -14470,14 +14602,14 @@ uint32_t ShowCompactResponse::read(::apache::thrift::protocol::TProtocol* iprot)
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->compacts.clear();
- uint32_t _size583;
- ::apache::thrift::protocol::TType _etype586;
- xfer += iprot->readListBegin(_etype586, _size583);
- this->compacts.resize(_size583);
- uint32_t _i587;
- for (_i587 = 0; _i587 < _size583; ++_i587)
+ uint32_t _size585;
+ ::apache::thrift::protocol::TType _etype588;
+ xfer += iprot->readListBegin(_etype588, _size585);
+ this->compacts.resize(_size585);
+ uint32_t _i589;
+ for (_i589 = 0; _i589 < _size585; ++_i589)
{
- xfer += this->compacts[_i587].read(iprot);
+ xfer += this->compacts[_i589].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -14508,10 +14640,10 @@ uint32_t ShowCompactResponse::write(::apache::thrift::protocol::TProtocol* oprot
xfer += oprot->writeFieldBegin("compacts", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->compacts.size()));
- std::vector<ShowCompactResponseElement> ::const_iterator _iter588;
- for (_iter588 = this->compacts.begin(); _iter588 != this->compacts.end(); ++_iter588)
+ std::vector<ShowCompactResponseElement> ::const_iterator _iter590;
+ for (_iter590 = this->compacts.begin(); _iter590 != this->compacts.end(); ++_iter590)
{
- xfer += (*_iter588).write(oprot);
+ xfer += (*_iter590).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -14527,11 +14659,11 @@ void swap(ShowCompactResponse &a, ShowCompactResponse &b) {
swap(a.compacts, b.compacts);
}
-ShowCompactResponse::ShowCompactResponse(const ShowCompactResponse& other589) {
- compacts = other589.compacts;
+ShowCompactResponse::ShowCompactResponse(const ShowCompactResponse& other591) {
+ compacts = other591.compacts;
}
-ShowCompactResponse& ShowCompactResponse::operator=(const ShowCompactResponse& other590) {
- compacts = other590.compacts;
+ShowCompactResponse& ShowCompactResponse::operator=(const ShowCompactResponse& other592) {
+ compacts = other592.compacts;
return *this;
}
void ShowCompactResponse::printTo(std::ostream& out) const {
@@ -14615,14 +14747,14 @@ uint32_t AddDynamicPartitions::read(::apache::thrift::protocol::TProtocol* iprot
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->partitionnames.clear();
- uint32_t _size591;
- ::apache::thrift::protocol::TType _etype594;
- xfer += iprot->readListBegin(_etype594, _size591);
- this->partitionnames.resize(_size591);
- uint32_t _i595;
- for (_i595 = 0; _i595 < _size591; ++_i595)
+ uint32_t _size593;
+ ::apache::thrift::protocol::TType _etype596;
+ xfer += iprot->readListBegin(_etype596, _size593);
+ this->partitionnames.resize(_size593);
+ uint32_t _i597;
+ for (_i597 = 0; _i597 < _size593; ++_i597)
{
- xfer += iprot->readString(this->partitionnames[_i595]);
+ xfer += iprot->readString(this->partitionnames[_i597]);
}
xfer += iprot->readListEnd();
}
@@ -14671,10 +14803,10 @@ uint32_t AddDynamicPartitions::write(::apache::thrift::protocol::TProtocol* opro
xfer += oprot->writeFieldBegin("partitionnames", ::apache::thrift::protocol::T_LIST, 4);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->partitionnames.size()));
- std::vector<std::string> ::const_iterator _iter596;
- for (_iter596 = this->partitionnames.begin(); _iter596 != this->partitionnames.end(); ++_iter596)
+ std::vector<std::string> ::const_iterator _iter598;
+ for (_iter598 = this->partitionnames.begin(); _iter598 != this->partitionnames.end(); ++_iter598)
{
- xfer += oprot->writeString((*_iter596));
+ xfer += oprot->writeString((*_iter598));
}
xfer += oprot->writeListEnd();
}
@@ -14693,17 +14825,17 @@ void swap(AddDynamicPartitions &a, AddDynamicPartitions &b) {
swap(a.partitionnames, b.partitionnames);
}
-AddDynamicPartitions::AddDynamicPartitions(const AddDynamicPartitions& other597) {
- txnid = other597.txnid;
- dbname = other597.dbname;
- tablename = other597.tablename;
- partitionnames = other597.partitionnames;
+AddDynamicPartitions::AddDynamicPartitions(const AddDynamicPartitions& other599) {
+ txnid = other599.txnid;
+ dbname = other599.dbname;
+ tablename = other599.tablename;
+ partitionnames = other599.partitionnames;
}
-AddDynamicPartitions& AddDynamicPartitions::operator=(const AddDynamicPartitions& other598) {
- txnid = other598.txnid;
- dbname = other598.dbname;
- tablename = other598.tablename;
- partitionnames = other598.partitionnames;
+AddDynamicPartitions& AddDynamicPartitions::operator=(const AddDynamicPartitions& other600) {
+ txnid = other600.txnid;
+ dbname = other600.dbname;
+ tablename = other600.tablename;
+ partitionnames = other600.partitionnames;
return *this;
}
void AddDynamicPartitions::printTo(std::ostream& out) const {
@@ -14808,15 +14940,15 @@ void swap(NotificationEventRequest &a, NotificationEventRequest &b) {
swap(a.__isset, b.__isset);
}
-NotificationEventRequest::NotificationEventRequest(const NotificationEventRequest& other599) {
- lastEvent = other599.lastEvent;
- maxEvents = other599.maxEvents;
- __isset = other599.__isset;
+NotificationEventRequest::NotificationEventRequest(const NotificationEventRequest& other601) {
+ lastEvent = other601.lastEvent;
+ maxEvents = other601.maxEvents;
+ __isset = other601.__isset;
}
-NotificationEventRequest& NotificationEventRequest::operator=(const No
<TRUNCATED>
[08/20] hive git commit: HIVE-13351: Support drop Primary Key/Foreign
Key constraints (Hari Subramaniyan, reviewed by Ashutosh Chauhan)
Posted by jd...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
index 690c895..2734a1c 100644
--- a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
+++ b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
@@ -1240,14 +1240,14 @@ uint32_t ThriftHiveMetastore_get_databases_result::read(::apache::thrift::protoc
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size749;
- ::apache::thrift::protocol::TType _etype752;
- xfer += iprot->readListBegin(_etype752, _size749);
- this->success.resize(_size749);
- uint32_t _i753;
- for (_i753 = 0; _i753 < _size749; ++_i753)
+ uint32_t _size751;
+ ::apache::thrift::protocol::TType _etype754;
+ xfer += iprot->readListBegin(_etype754, _size751);
+ this->success.resize(_size751);
+ uint32_t _i755;
+ for (_i755 = 0; _i755 < _size751; ++_i755)
{
- xfer += iprot->readString(this->success[_i753]);
+ xfer += iprot->readString(this->success[_i755]);
}
xfer += iprot->readListEnd();
}
@@ -1286,10 +1286,10 @@ uint32_t ThriftHiveMetastore_get_databases_result::write(::apache::thrift::proto
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
- std::vector<std::string> ::const_iterator _iter754;
- for (_iter754 = this->success.begin(); _iter754 != this->success.end(); ++_iter754)
+ std::vector<std::string> ::const_iterator _iter756;
+ for (_iter756 = this->success.begin(); _iter756 != this->success.end(); ++_iter756)
{
- xfer += oprot->writeString((*_iter754));
+ xfer += oprot->writeString((*_iter756));
}
xfer += oprot->writeListEnd();
}
@@ -1334,14 +1334,14 @@ uint32_t ThriftHiveMetastore_get_databases_presult::read(::apache::thrift::proto
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size755;
- ::apache::thrift::protocol::TType _etype758;
- xfer += iprot->readListBegin(_etype758, _size755);
- (*(this->success)).resize(_size755);
- uint32_t _i759;
- for (_i759 = 0; _i759 < _size755; ++_i759)
+ uint32_t _size757;
+ ::apache::thrift::protocol::TType _etype760;
+ xfer += iprot->readListBegin(_etype760, _size757);
+ (*(this->success)).resize(_size757);
+ uint32_t _i761;
+ for (_i761 = 0; _i761 < _size757; ++_i761)
{
- xfer += iprot->readString((*(this->success))[_i759]);
+ xfer += iprot->readString((*(this->success))[_i761]);
}
xfer += iprot->readListEnd();
}
@@ -1458,14 +1458,14 @@ uint32_t ThriftHiveMetastore_get_all_databases_result::read(::apache::thrift::pr
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size760;
- ::apache::thrift::protocol::TType _etype763;
- xfer += iprot->readListBegin(_etype763, _size760);
- this->success.resize(_size760);
- uint32_t _i764;
- for (_i764 = 0; _i764 < _size760; ++_i764)
+ uint32_t _size762;
+ ::apache::thrift::protocol::TType _etype765;
+ xfer += iprot->readListBegin(_etype765, _size762);
+ this->success.resize(_size762);
+ uint32_t _i766;
+ for (_i766 = 0; _i766 < _size762; ++_i766)
{
- xfer += iprot->readString(this->success[_i764]);
+ xfer += iprot->readString(this->success[_i766]);
}
xfer += iprot->readListEnd();
}
@@ -1504,10 +1504,10 @@ uint32_t ThriftHiveMetastore_get_all_databases_result::write(::apache::thrift::p
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
- std::vector<std::string> ::const_iterator _iter765;
- for (_iter765 = this->success.begin(); _iter765 != this->success.end(); ++_iter765)
+ std::vector<std::string> ::const_iterator _iter767;
+ for (_iter767 = this->success.begin(); _iter767 != this->success.end(); ++_iter767)
{
- xfer += oprot->writeString((*_iter765));
+ xfer += oprot->writeString((*_iter767));
}
xfer += oprot->writeListEnd();
}
@@ -1552,14 +1552,14 @@ uint32_t ThriftHiveMetastore_get_all_databases_presult::read(::apache::thrift::p
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size766;
- ::apache::thrift::protocol::TType _etype769;
- xfer += iprot->readListBegin(_etype769, _size766);
- (*(this->success)).resize(_size766);
- uint32_t _i770;
- for (_i770 = 0; _i770 < _size766; ++_i770)
+ uint32_t _size768;
+ ::apache::thrift::protocol::TType _etype771;
+ xfer += iprot->readListBegin(_etype771, _size768);
+ (*(this->success)).resize(_size768);
+ uint32_t _i772;
+ for (_i772 = 0; _i772 < _size768; ++_i772)
{
- xfer += iprot->readString((*(this->success))[_i770]);
+ xfer += iprot->readString((*(this->success))[_i772]);
}
xfer += iprot->readListEnd();
}
@@ -2621,17 +2621,17 @@ uint32_t ThriftHiveMetastore_get_type_all_result::read(::apache::thrift::protoco
if (ftype == ::apache::thrift::protocol::T_MAP) {
{
this->success.clear();
- uint32_t _size771;
- ::apache::thrift::protocol::TType _ktype772;
- ::apache::thrift::protocol::TType _vtype773;
- xfer += iprot->readMapBegin(_ktype772, _vtype773, _size771);
- uint32_t _i775;
- for (_i775 = 0; _i775 < _size771; ++_i775)
+ uint32_t _size773;
+ ::apache::thrift::protocol::TType _ktype774;
+ ::apache::thrift::protocol::TType _vtype775;
+ xfer += iprot->readMapBegin(_ktype774, _vtype775, _size773);
+ uint32_t _i777;
+ for (_i777 = 0; _i777 < _size773; ++_i777)
{
- std::string _key776;
- xfer += iprot->readString(_key776);
- Type& _val777 = this->success[_key776];
- xfer += _val777.read(iprot);
+ std::string _key778;
+ xfer += iprot->readString(_key778);
+ Type& _val779 = this->success[_key778];
+ xfer += _val779.read(iprot);
}
xfer += iprot->readMapEnd();
}
@@ -2670,11 +2670,11 @@ uint32_t ThriftHiveMetastore_get_type_all_result::write(::apache::thrift::protoc
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0);
{
xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
- std::map<std::string, Type> ::const_iterator _iter778;
- for (_iter778 = this->success.begin(); _iter778 != this->success.end(); ++_iter778)
+ std::map<std::string, Type> ::const_iterator _iter780;
+ for (_iter780 = this->success.begin(); _iter780 != this->success.end(); ++_iter780)
{
- xfer += oprot->writeString(_iter778->first);
- xfer += _iter778->second.write(oprot);
+ xfer += oprot->writeString(_iter780->first);
+ xfer += _iter780->second.write(oprot);
}
xfer += oprot->writeMapEnd();
}
@@ -2719,17 +2719,17 @@ uint32_t ThriftHiveMetastore_get_type_all_presult::read(::apache::thrift::protoc
if (ftype == ::apache::thrift::protocol::T_MAP) {
{
(*(this->success)).clear();
- uint32_t _size779;
- ::apache::thrift::protocol::TType _ktype780;
- ::apache::thrift::protocol::TType _vtype781;
- xfer += iprot->readMapBegin(_ktype780, _vtype781, _size779);
- uint32_t _i783;
- for (_i783 = 0; _i783 < _size779; ++_i783)
+ uint32_t _size781;
+ ::apache::thrift::protocol::TType _ktype782;
+ ::apache::thrift::protocol::TType _vtype783;
+ xfer += iprot->readMapBegin(_ktype782, _vtype783, _size781);
+ uint32_t _i785;
+ for (_i785 = 0; _i785 < _size781; ++_i785)
{
- std::string _key784;
- xfer += iprot->readString(_key784);
- Type& _val785 = (*(this->success))[_key784];
- xfer += _val785.read(iprot);
+ std::string _key786;
+ xfer += iprot->readString(_key786);
+ Type& _val787 = (*(this->success))[_key786];
+ xfer += _val787.read(iprot);
}
xfer += iprot->readMapEnd();
}
@@ -2883,14 +2883,14 @@ uint32_t ThriftHiveMetastore_get_fields_result::read(::apache::thrift::protocol:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size786;
- ::apache::thrift::protocol::TType _etype789;
- xfer += iprot->readListBegin(_etype789, _size786);
- this->success.resize(_size786);
- uint32_t _i790;
- for (_i790 = 0; _i790 < _size786; ++_i790)
+ uint32_t _size788;
+ ::apache::thrift::protocol::TType _etype791;
+ xfer += iprot->readListBegin(_etype791, _size788);
+ this->success.resize(_size788);
+ uint32_t _i792;
+ for (_i792 = 0; _i792 < _size788; ++_i792)
{
- xfer += this->success[_i790].read(iprot);
+ xfer += this->success[_i792].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -2945,10 +2945,10 @@ uint32_t ThriftHiveMetastore_get_fields_result::write(::apache::thrift::protocol
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
- std::vector<FieldSchema> ::const_iterator _iter791;
- for (_iter791 = this->success.begin(); _iter791 != this->success.end(); ++_iter791)
+ std::vector<FieldSchema> ::const_iterator _iter793;
+ for (_iter793 = this->success.begin(); _iter793 != this->success.end(); ++_iter793)
{
- xfer += (*_iter791).write(oprot);
+ xfer += (*_iter793).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -3001,14 +3001,14 @@ uint32_t ThriftHiveMetastore_get_fields_presult::read(::apache::thrift::protocol
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size792;
- ::apache::thrift::protocol::TType _etype795;
- xfer += iprot->readListBegin(_etype795, _size792);
- (*(this->success)).resize(_size792);
- uint32_t _i796;
- for (_i796 = 0; _i796 < _size792; ++_i796)
+ uint32_t _size794;
+ ::apache::thrift::protocol::TType _etype797;
+ xfer += iprot->readListBegin(_etype797, _size794);
+ (*(this->success)).resize(_size794);
+ uint32_t _i798;
+ for (_i798 = 0; _i798 < _size794; ++_i798)
{
- xfer += (*(this->success))[_i796].read(iprot);
+ xfer += (*(this->success))[_i798].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -3194,14 +3194,14 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::read(::
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size797;
- ::apache::thrift::protocol::TType _etype800;
- xfer += iprot->readListBegin(_etype800, _size797);
- this->success.resize(_size797);
- uint32_t _i801;
- for (_i801 = 0; _i801 < _size797; ++_i801)
+ uint32_t _size799;
+ ::apache::thrift::protocol::TType _etype802;
+ xfer += iprot->readListBegin(_etype802, _size799);
+ this->success.resize(_size799);
+ uint32_t _i803;
+ for (_i803 = 0; _i803 < _size799; ++_i803)
{
- xfer += this->success[_i801].read(iprot);
+ xfer += this->success[_i803].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -3256,10 +3256,10 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::write(:
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
- std::vector<FieldSchema> ::const_iterator _iter802;
- for (_iter802 = this->success.begin(); _iter802 != this->success.end(); ++_iter802)
+ std::vector<FieldSchema> ::const_iterator _iter804;
+ for (_iter804 = this->success.begin(); _iter804 != this->success.end(); ++_iter804)
{
- xfer += (*_iter802).write(oprot);
+ xfer += (*_iter804).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -3312,14 +3312,14 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_presult::read(:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size803;
- ::apache::thrift::protocol::TType _etype806;
- xfer += iprot->readListBegin(_etype806, _size803);
- (*(this->success)).resize(_size803);
- uint32_t _i807;
- for (_i807 = 0; _i807 < _size803; ++_i807)
+ uint32_t _size805;
+ ::apache::thrift::protocol::TType _etype808;
+ xfer += iprot->readListBegin(_etype808, _size805);
+ (*(this->success)).resize(_size805);
+ uint32_t _i809;
+ for (_i809 = 0; _i809 < _size805; ++_i809)
{
- xfer += (*(this->success))[_i807].read(iprot);
+ xfer += (*(this->success))[_i809].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -3489,14 +3489,14 @@ uint32_t ThriftHiveMetastore_get_schema_result::read(::apache::thrift::protocol:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size808;
- ::apache::thrift::protocol::TType _etype811;
- xfer += iprot->readListBegin(_etype811, _size808);
- this->success.resize(_size808);
- uint32_t _i812;
- for (_i812 = 0; _i812 < _size808; ++_i812)
+ uint32_t _size810;
+ ::apache::thrift::protocol::TType _etype813;
+ xfer += iprot->readListBegin(_etype813, _size810);
+ this->success.resize(_size810);
+ uint32_t _i814;
+ for (_i814 = 0; _i814 < _size810; ++_i814)
{
- xfer += this->success[_i812].read(iprot);
+ xfer += this->success[_i814].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -3551,10 +3551,10 @@ uint32_t ThriftHiveMetastore_get_schema_result::write(::apache::thrift::protocol
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
- std::vector<FieldSchema> ::const_iterator _iter813;
- for (_iter813 = this->success.begin(); _iter813 != this->success.end(); ++_iter813)
+ std::vector<FieldSchema> ::const_iterator _iter815;
+ for (_iter815 = this->success.begin(); _iter815 != this->success.end(); ++_iter815)
{
- xfer += (*_iter813).write(oprot);
+ xfer += (*_iter815).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -3607,14 +3607,14 @@ uint32_t ThriftHiveMetastore_get_schema_presult::read(::apache::thrift::protocol
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size814;
- ::apache::thrift::protocol::TType _etype817;
- xfer += iprot->readListBegin(_etype817, _size814);
- (*(this->success)).resize(_size814);
- uint32_t _i818;
- for (_i818 = 0; _i818 < _size814; ++_i818)
+ uint32_t _size816;
+ ::apache::thrift::protocol::TType _etype819;
+ xfer += iprot->readListBegin(_etype819, _size816);
+ (*(this->success)).resize(_size816);
+ uint32_t _i820;
+ for (_i820 = 0; _i820 < _size816; ++_i820)
{
- xfer += (*(this->success))[_i818].read(iprot);
+ xfer += (*(this->success))[_i820].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -3800,14 +3800,14 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::read(::
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size819;
- ::apache::thrift::protocol::TType _etype822;
- xfer += iprot->readListBegin(_etype822, _size819);
- this->success.resize(_size819);
- uint32_t _i823;
- for (_i823 = 0; _i823 < _size819; ++_i823)
+ uint32_t _size821;
+ ::apache::thrift::protocol::TType _etype824;
+ xfer += iprot->readListBegin(_etype824, _size821);
+ this->success.resize(_size821);
+ uint32_t _i825;
+ for (_i825 = 0; _i825 < _size821; ++_i825)
{
- xfer += this->success[_i823].read(iprot);
+ xfer += this->success[_i825].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -3862,10 +3862,10 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::write(:
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
- std::vector<FieldSchema> ::const_iterator _iter824;
- for (_iter824 = this->success.begin(); _iter824 != this->success.end(); ++_iter824)
+ std::vector<FieldSchema> ::const_iterator _iter826;
+ for (_iter826 = this->success.begin(); _iter826 != this->success.end(); ++_iter826)
{
- xfer += (*_iter824).write(oprot);
+ xfer += (*_iter826).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -3918,14 +3918,14 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_presult::read(:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size825;
- ::apache::thrift::protocol::TType _etype828;
- xfer += iprot->readListBegin(_etype828, _size825);
- (*(this->success)).resize(_size825);
- uint32_t _i829;
- for (_i829 = 0; _i829 < _size825; ++_i829)
+ uint32_t _size827;
+ ::apache::thrift::protocol::TType _etype830;
+ xfer += iprot->readListBegin(_etype830, _size827);
+ (*(this->success)).resize(_size827);
+ uint32_t _i831;
+ for (_i831 = 0; _i831 < _size827; ++_i831)
{
- xfer += (*(this->success))[_i829].read(iprot);
+ xfer += (*(this->success))[_i831].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -4518,14 +4518,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache::
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->primaryKeys.clear();
- uint32_t _size830;
- ::apache::thrift::protocol::TType _etype833;
- xfer += iprot->readListBegin(_etype833, _size830);
- this->primaryKeys.resize(_size830);
- uint32_t _i834;
- for (_i834 = 0; _i834 < _size830; ++_i834)
+ uint32_t _size832;
+ ::apache::thrift::protocol::TType _etype835;
+ xfer += iprot->readListBegin(_etype835, _size832);
+ this->primaryKeys.resize(_size832);
+ uint32_t _i836;
+ for (_i836 = 0; _i836 < _size832; ++_i836)
{
- xfer += this->primaryKeys[_i834].read(iprot);
+ xfer += this->primaryKeys[_i836].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -4538,14 +4538,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache::
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->foreignKeys.clear();
- uint32_t _size835;
- ::apache::thrift::protocol::TType _etype838;
- xfer += iprot->readListBegin(_etype838, _size835);
- this->foreignKeys.resize(_size835);
- uint32_t _i839;
- for (_i839 = 0; _i839 < _size835; ++_i839)
+ uint32_t _size837;
+ ::apache::thrift::protocol::TType _etype840;
+ xfer += iprot->readListBegin(_etype840, _size837);
+ this->foreignKeys.resize(_size837);
+ uint32_t _i841;
+ for (_i841 = 0; _i841 < _size837; ++_i841)
{
- xfer += this->foreignKeys[_i839].read(iprot);
+ xfer += this->foreignKeys[_i841].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -4578,10 +4578,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache:
xfer += oprot->writeFieldBegin("primaryKeys", ::apache::thrift::protocol::T_LIST, 2);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->primaryKeys.size()));
- std::vector<SQLPrimaryKey> ::const_iterator _iter840;
- for (_iter840 = this->primaryKeys.begin(); _iter840 != this->primaryKeys.end(); ++_iter840)
+ std::vector<SQLPrimaryKey> ::const_iterator _iter842;
+ for (_iter842 = this->primaryKeys.begin(); _iter842 != this->primaryKeys.end(); ++_iter842)
{
- xfer += (*_iter840).write(oprot);
+ xfer += (*_iter842).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -4590,10 +4590,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache:
xfer += oprot->writeFieldBegin("foreignKeys", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->foreignKeys.size()));
- std::vector<SQLForeignKey> ::const_iterator _iter841;
- for (_iter841 = this->foreignKeys.begin(); _iter841 != this->foreignKeys.end(); ++_iter841)
+ std::vector<SQLForeignKey> ::const_iterator _iter843;
+ for (_iter843 = this->foreignKeys.begin(); _iter843 != this->foreignKeys.end(); ++_iter843)
{
- xfer += (*_iter841).write(oprot);
+ xfer += (*_iter843).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -4621,10 +4621,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache
xfer += oprot->writeFieldBegin("primaryKeys", ::apache::thrift::protocol::T_LIST, 2);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>((*(this->primaryKeys)).size()));
- std::vector<SQLPrimaryKey> ::const_iterator _iter842;
- for (_iter842 = (*(this->primaryKeys)).begin(); _iter842 != (*(this->primaryKeys)).end(); ++_iter842)
+ std::vector<SQLPrimaryKey> ::const_iterator _iter844;
+ for (_iter844 = (*(this->primaryKeys)).begin(); _iter844 != (*(this->primaryKeys)).end(); ++_iter844)
{
- xfer += (*_iter842).write(oprot);
+ xfer += (*_iter844).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -4633,10 +4633,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache
xfer += oprot->writeFieldBegin("foreignKeys", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>((*(this->foreignKeys)).size()));
- std::vector<SQLForeignKey> ::const_iterator _iter843;
- for (_iter843 = (*(this->foreignKeys)).begin(); _iter843 != (*(this->foreignKeys)).end(); ++_iter843)
+ std::vector<SQLForeignKey> ::const_iterator _iter845;
+ for (_iter845 = (*(this->foreignKeys)).begin(); _iter845 != (*(this->foreignKeys)).end(); ++_iter845)
{
- xfer += (*_iter843).write(oprot);
+ xfer += (*_iter845).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -4816,6 +4816,213 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_presult::read(::apach
}
+ThriftHiveMetastore_drop_constraint_args::~ThriftHiveMetastore_drop_constraint_args() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_drop_constraint_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->req.read(iprot);
+ this->__isset.req = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t ThriftHiveMetastore_drop_constraint_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+ xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_constraint_args");
+
+ xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1);
+ xfer += this->req.write(oprot);
+ xfer += oprot->writeFieldEnd();
+
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+
+ThriftHiveMetastore_drop_constraint_pargs::~ThriftHiveMetastore_drop_constraint_pargs() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_drop_constraint_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+ xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_constraint_pargs");
+
+ xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1);
+ xfer += (*(this->req)).write(oprot);
+ xfer += oprot->writeFieldEnd();
+
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+
+ThriftHiveMetastore_drop_constraint_result::~ThriftHiveMetastore_drop_constraint_result() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_drop_constraint_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->o1.read(iprot);
+ this->__isset.o1 = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->o3.read(iprot);
+ this->__isset.o3 = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t ThriftHiveMetastore_drop_constraint_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+ uint32_t xfer = 0;
+
+ xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_constraint_result");
+
+ if (this->__isset.o1) {
+ xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1);
+ xfer += this->o1.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.o3) {
+ xfer += oprot->writeFieldBegin("o3", ::apache::thrift::protocol::T_STRUCT, 2);
+ xfer += this->o3.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+
+ThriftHiveMetastore_drop_constraint_presult::~ThriftHiveMetastore_drop_constraint_presult() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_drop_constraint_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->o1.read(iprot);
+ this->__isset.o1 = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->o3.read(iprot);
+ this->__isset.o3 = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+
ThriftHiveMetastore_drop_table_args::~ThriftHiveMetastore_drop_table_args() throw() {
}
@@ -5434,14 +5641,14 @@ uint32_t ThriftHiveMetastore_get_tables_result::read(::apache::thrift::protocol:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size844;
- ::apache::thrift::protocol::TType _etype847;
- xfer += iprot->readListBegin(_etype847, _size844);
- this->success.resize(_size844);
- uint32_t _i848;
- for (_i848 = 0; _i848 < _size844; ++_i848)
+ uint32_t _size846;
+ ::apache::thrift::protocol::TType _etype849;
+ xfer += iprot->readListBegin(_etype849, _size846);
+ this->success.resize(_size846);
+ uint32_t _i850;
+ for (_i850 = 0; _i850 < _size846; ++_i850)
{
- xfer += iprot->readString(this->success[_i848]);
+ xfer += iprot->readString(this->success[_i850]);
}
xfer += iprot->readListEnd();
}
@@ -5480,10 +5687,10 @@ uint32_t ThriftHiveMetastore_get_tables_result::write(::apache::thrift::protocol
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
- std::vector<std::string> ::const_iterator _iter849;
- for (_iter849 = this->success.begin(); _iter849 != this->success.end(); ++_iter849)
+ std::vector<std::string> ::const_iterator _iter851;
+ for (_iter851 = this->success.begin(); _iter851 != this->success.end(); ++_iter851)
{
- xfer += oprot->writeString((*_iter849));
+ xfer += oprot->writeString((*_iter851));
}
xfer += oprot->writeListEnd();
}
@@ -5528,14 +5735,14 @@ uint32_t ThriftHiveMetastore_get_tables_presult::read(::apache::thrift::protocol
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size850;
- ::apache::thrift::protocol::TType _etype853;
- xfer += iprot->readListBegin(_etype853, _size850);
- (*(this->success)).resize(_size850);
- uint32_t _i854;
- for (_i854 = 0; _i854 < _size850; ++_i854)
+ uint32_t _size852;
+ ::apache::thrift::protocol::TType _etype855;
+ xfer += iprot->readListBegin(_etype855, _size852);
+ (*(this->success)).resize(_size852);
+ uint32_t _i856;
+ for (_i856 = 0; _i856 < _size852; ++_i856)
{
- xfer += iprot->readString((*(this->success))[_i854]);
+ xfer += iprot->readString((*(this->success))[_i856]);
}
xfer += iprot->readListEnd();
}
@@ -5610,14 +5817,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_args::read(::apache::thrift::protoco
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->tbl_types.clear();
- uint32_t _size855;
- ::apache::thrift::protocol::TType _etype858;
- xfer += iprot->readListBegin(_etype858, _size855);
- this->tbl_types.resize(_size855);
- uint32_t _i859;
- for (_i859 = 0; _i859 < _size855; ++_i859)
+ uint32_t _size857;
+ ::apache::thrift::protocol::TType _etype860;
+ xfer += iprot->readListBegin(_etype860, _size857);
+ this->tbl_types.resize(_size857);
+ uint32_t _i861;
+ for (_i861 = 0; _i861 < _size857; ++_i861)
{
- xfer += iprot->readString(this->tbl_types[_i859]);
+ xfer += iprot->readString(this->tbl_types[_i861]);
}
xfer += iprot->readListEnd();
}
@@ -5654,10 +5861,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_args::write(::apache::thrift::protoc
xfer += oprot->writeFieldBegin("tbl_types", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->tbl_types.size()));
- std::vector<std::string> ::const_iterator _iter860;
- for (_iter860 = this->tbl_types.begin(); _iter860 != this->tbl_types.end(); ++_iter860)
+ std::vector<std::string> ::const_iterator _iter862;
+ for (_iter862 = this->tbl_types.begin(); _iter862 != this->tbl_types.end(); ++_iter862)
{
- xfer += oprot->writeString((*_iter860));
+ xfer += oprot->writeString((*_iter862));
}
xfer += oprot->writeListEnd();
}
@@ -5689,10 +5896,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_pargs::write(::apache::thrift::proto
xfer += oprot->writeFieldBegin("tbl_types", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->tbl_types)).size()));
- std::vector<std::string> ::const_iterator _iter861;
- for (_iter861 = (*(this->tbl_types)).begin(); _iter861 != (*(this->tbl_types)).end(); ++_iter861)
+ std::vector<std::string> ::const_iterator _iter863;
+ for (_iter863 = (*(this->tbl_types)).begin(); _iter863 != (*(this->tbl_types)).end(); ++_iter863)
{
- xfer += oprot->writeString((*_iter861));
+ xfer += oprot->writeString((*_iter863));
}
xfer += oprot->writeListEnd();
}
@@ -5733,14 +5940,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_result::read(::apache::thrift::proto
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size862;
- ::apache::thrift::protocol::TType _etype865;
- xfer += iprot->readListBegin(_etype865, _size862);
- this->success.resize(_size862);
- uint32_t _i866;
- for (_i866 = 0; _i866 < _size862; ++_i866)
+ uint32_t _size864;
+ ::apache::thrift::protocol::TType _etype867;
+ xfer += iprot->readListBegin(_etype867, _size864);
+ this->success.resize(_size864);
+ uint32_t _i868;
+ for (_i868 = 0; _i868 < _size864; ++_i868)
{
- xfer += this->success[_i866].read(iprot);
+ xfer += this->success[_i868].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -5779,10 +5986,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_result::write(::apache::thrift::prot
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
- std::vector<TableMeta> ::const_iterator _iter867;
- for (_iter867 = this->success.begin(); _iter867 != this->success.end(); ++_iter867)
+ std::vector<TableMeta> ::const_iterator _iter869;
+ for (_iter869 = this->success.begin(); _iter869 != this->success.end(); ++_iter869)
{
- xfer += (*_iter867).write(oprot);
+ xfer += (*_iter869).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -5827,14 +6034,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_presult::read(::apache::thrift::prot
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size868;
- ::apache::thrift::protocol::TType _etype871;
- xfer += iprot->readListBegin(_etype871, _size868);
- (*(this->success)).resize(_size868);
- uint32_t _i872;
- for (_i872 = 0; _i872 < _size868; ++_i872)
+ uint32_t _size870;
+ ::apache::thrift::protocol::TType _etype873;
+ xfer += iprot->readListBegin(_etype873, _size870);
+ (*(this->success)).resize(_size870);
+ uint32_t _i874;
+ for (_i874 = 0; _i874 < _size870; ++_i874)
{
- xfer += (*(this->success))[_i872].read(iprot);
+ xfer += (*(this->success))[_i874].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -5972,14 +6179,14 @@ uint32_t ThriftHiveMetastore_get_all_tables_result::read(::apache::thrift::proto
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size873;
- ::apache::thrift::protocol::TType _etype876;
- xfer += iprot->readListBegin(_etype876, _size873);
- this->success.resize(_size873);
- uint32_t _i877;
- for (_i877 = 0; _i877 < _size873; ++_i877)
+ uint32_t _size875;
+ ::apache::thrift::protocol::TType _etype878;
+ xfer += iprot->readListBegin(_etype878, _size875);
+ this->success.resize(_size875);
+ uint32_t _i879;
+ for (_i879 = 0; _i879 < _size875; ++_i879)
{
- xfer += iprot->readString(this->success[_i877]);
+ xfer += iprot->readString(this->success[_i879]);
}
xfer += iprot->readListEnd();
}
@@ -6018,10 +6225,10 @@ uint32_t ThriftHiveMetastore_get_all_tables_result::write(::apache::thrift::prot
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
- std::vector<std::string> ::const_iterator _iter878;
- for (_iter878 = this->success.begin(); _iter878 != this->success.end(); ++_iter878)
+ std::vector<std::string> ::const_iterator _iter880;
+ for (_iter880 = this->success.begin(); _iter880 != this->success.end(); ++_iter880)
{
- xfer += oprot->writeString((*_iter878));
+ xfer += oprot->writeString((*_iter880));
}
xfer += oprot->writeListEnd();
}
@@ -6066,14 +6273,14 @@ uint32_t ThriftHiveMetastore_get_all_tables_presult::read(::apache::thrift::prot
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size879;
- ::apache::thrift::protocol::TType _etype882;
- xfer += iprot->readListBegin(_etype882, _size879);
- (*(this->success)).resize(_size879);
- uint32_t _i883;
- for (_i883 = 0; _i883 < _size879; ++_i883)
+ uint32_t _size881;
+ ::apache::thrift::protocol::TType _etype884;
+ xfer += iprot->readListBegin(_etype884, _size881);
+ (*(this->success)).resize(_size881);
+ uint32_t _i885;
+ for (_i885 = 0; _i885 < _size881; ++_i885)
{
- xfer += iprot->readString((*(this->success))[_i883]);
+ xfer += iprot->readString((*(this->success))[_i885]);
}
xfer += iprot->readListEnd();
}
@@ -6383,14 +6590,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_args::read(::apache::thri
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->tbl_names.clear();
- uint32_t _size884;
- ::apache::thrift::protocol::TType _etype887;
- xfer += iprot->readListBegin(_etype887, _size884);
- this->tbl_names.resize(_size884);
- uint32_t _i888;
- for (_i888 = 0; _i888 < _size884; ++_i888)
+ uint32_t _size886;
+ ::apache::thrift::protocol::TType _etype889;
+ xfer += iprot->readListBegin(_etype889, _size886);
+ this->tbl_names.resize(_size886);
+ uint32_t _i890;
+ for (_i890 = 0; _i890 < _size886; ++_i890)
{
- xfer += iprot->readString(this->tbl_names[_i888]);
+ xfer += iprot->readString(this->tbl_names[_i890]);
}
xfer += iprot->readListEnd();
}
@@ -6423,10 +6630,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_args::write(::apache::thr
xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->tbl_names.size()));
- std::vector<std::string> ::const_iterator _iter889;
- for (_iter889 = this->tbl_names.begin(); _iter889 != this->tbl_names.end(); ++_iter889)
+ std::vector<std::string> ::const_iterator _iter891;
+ for (_iter891 = this->tbl_names.begin(); _iter891 != this->tbl_names.end(); ++_iter891)
{
- xfer += oprot->writeString((*_iter889));
+ xfer += oprot->writeString((*_iter891));
}
xfer += oprot->writeListEnd();
}
@@ -6454,10 +6661,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_pargs::write(::apache::th
xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->tbl_names)).size()));
- std::vector<std::string> ::const_iterator _iter890;
- for (_iter890 = (*(this->tbl_names)).begin(); _iter890 != (*(this->tbl_names)).end(); ++_iter890)
+ std::vector<std::string> ::const_iterator _iter892;
+ for (_iter892 = (*(this->tbl_names)).begin(); _iter892 != (*(this->tbl_names)).end(); ++_iter892)
{
- xfer += oprot->writeString((*_iter890));
+ xfer += oprot->writeString((*_iter892));
}
xfer += oprot->writeListEnd();
}
@@ -6498,14 +6705,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_result::read(::apache::th
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size891;
- ::apache::thrift::protocol::TType _etype894;
- xfer += iprot->readListBegin(_etype894, _size891);
- this->success.resize(_size891);
- uint32_t _i895;
- for (_i895 = 0; _i895 < _size891; ++_i895)
+ uint32_t _size893;
+ ::apache::thrift::protocol::TType _etype896;
+ xfer += iprot->readListBegin(_etype896, _size893);
+ this->success.resize(_size893);
+ uint32_t _i897;
+ for (_i897 = 0; _i897 < _size893; ++_i897)
{
- xfer += this->success[_i895].read(iprot);
+ xfer += this->success[_i897].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -6560,10 +6767,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_result::write(::apache::t
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
- std::vector<Table> ::const_iterator _iter896;
- for (_iter896 = this->success.begin(); _iter896 != this->success.end(); ++_iter896)
+ std::vector<Table> ::const_iterator _iter898;
+ for (_iter898 = this->success.begin(); _iter898 != this->success.end(); ++_iter898)
{
- xfer += (*_iter896).write(oprot);
+ xfer += (*_iter898).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -6616,14 +6823,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_presult::read(::apache::t
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size897;
- ::apache::thrift::protocol::TType _etype900;
- xfer += iprot->readListBegin(_etype900, _size897);
- (*(this->success)).resize(_size897);
- uint32_t _i901;
- for (_i901 = 0; _i901 < _size897; ++_i901)
+ uint32_t _size899;
+ ::apache::thrift::protocol::TType _etype902;
+ xfer += iprot->readListBegin(_etype902, _size899);
+ (*(this->success)).resize(_size899);
+ uint32_t _i903;
+ for (_i903 = 0; _i903 < _size899; ++_i903)
{
- xfer += (*(this->success))[_i901].read(iprot);
+ xfer += (*(this->success))[_i903].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -6809,14 +7016,14 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_result::read(::apache::th
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size902;
- ::apache::thrift::protocol::TType _etype905;
- xfer += iprot->readListBegin(_etype905, _size902);
- this->success.resize(_size902);
- uint32_t _i906;
- for (_i906 = 0; _i906 < _size902; ++_i906)
+ uint32_t _size904;
+ ::apache::thrift::protocol::TType _etype907;
+ xfer += iprot->readListBegin(_etype907, _size904);
+ this->success.resize(_size904);
+ uint32_t _i908;
+ for (_i908 = 0; _i908 < _size904; ++_i908)
{
- xfer += iprot->readString(this->success[_i906]);
+ xfer += iprot->readString(this->success[_i908]);
}
xfer += iprot->readListEnd();
}
@@ -6871,10 +7078,10 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_result::write(::apache::t
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
- std::vector<std::string> ::const_iterator _iter907;
- for (_iter907 = this->success.begin(); _iter907 != this->success.end(); ++_iter907)
+ std::vector<std::string> ::const_iterator _iter909;
+ for (_iter909 = this->success.begin(); _iter909 != this->success.end(); ++_iter909)
{
- xfer += oprot->writeString((*_iter907));
+ xfer += oprot->writeString((*_iter909));
}
xfer += oprot->writeListEnd();
}
@@ -6927,14 +7134,14 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_presult::read(::apache::t
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size908;
- ::apache::thrift::protocol::TType _etype911;
- xfer += iprot->readListBegin(_etype911, _size908);
- (*(this->success)).resize(_size908);
- uint32_t _i912;
- for (_i912 = 0; _i912 < _size908; ++_i912)
+ uint32_t _size910;
+ ::apache::thrift::protocol::TType _etype913;
+ xfer += iprot->readListBegin(_etype913, _size910);
+ (*(this->success)).resize(_size910);
+ uint32_t _i914;
+ for (_i914 = 0; _i914 < _size910; ++_i914)
{
- xfer += iprot->readString((*(this->success))[_i912]);
+ xfer += iprot->readString((*(this->success))[_i914]);
}
xfer += iprot->readListEnd();
}
@@ -8268,14 +8475,14 @@ uint32_t ThriftHiveMetastore_add_partitions_args::read(::apache::thrift::protoco
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->new_parts.clear();
- uint32_t _size913;
- ::apache::thrift::protocol::TType _etype916;
- xfer += iprot->readListBegin(_etype916, _size913);
- this->new_parts.resize(_size913);
- uint32_t _i917;
- for (_i917 = 0; _i917 < _size913; ++_i917)
+ uint32_t _size915;
+ ::apache::thrift::protocol::TType _etype918;
+ xfer += iprot->readListBegin(_etype918, _size915);
+ this->new_parts.resize(_size915);
+ uint32_t _i919;
+ for (_i919 = 0; _i919 < _size915; ++_i919)
{
- xfer += this->new_parts[_i917].read(iprot);
+ xfer += this->new_parts[_i919].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -8304,10 +8511,10 @@ uint32_t ThriftHiveMetastore_add_partitions_args::write(::apache::thrift::protoc
xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->new_parts.size()));
- std::vector<Partition> ::const_iterator _iter918;
- for (_iter918 = this->new_parts.begin(); _iter918 != this->new_parts.end(); ++_iter918)
+ std::vector<Partition> ::const_iterator _iter920;
+ for (_iter920 = this->new_parts.begin(); _iter920 != this->new_parts.end(); ++_iter920)
{
- xfer += (*_iter918).write(oprot);
+ xfer += (*_iter920).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -8331,10 +8538,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pargs::write(::apache::thrift::proto
xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>((*(this->new_parts)).size()));
- std::vector<Partition> ::const_iterator _iter919;
- for (_iter919 = (*(this->new_parts)).begin(); _iter919 != (*(this->new_parts)).end(); ++_iter919)
+ std::vector<Partition> ::const_iterator _iter921;
+ for (_iter921 = (*(this->new_parts)).begin(); _iter921 != (*(this->new_parts)).end(); ++_iter921)
{
- xfer += (*_iter919).write(oprot);
+ xfer += (*_iter921).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -8543,14 +8750,14 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_args::read(::apache::thrift::p
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->new_parts.clear();
- uint32_t _size920;
- ::apache::thrift::protocol::TType _etype923;
- xfer += iprot->readListBegin(_etype923, _size920);
- this->new_parts.resize(_size920);
- uint32_t _i924;
- for (_i924 = 0; _i924 < _size920; ++_i924)
+ uint32_t _size922;
+ ::apache::thrift::protocol::TType _etype925;
+ xfer += iprot->readListBegin(_etype925, _size922);
+ this->new_parts.resize(_size922);
+ uint32_t _i926;
+ for (_i926 = 0; _i926 < _size922; ++_i926)
{
- xfer += this->new_parts[_i924].read(iprot);
+ xfer += this->new_parts[_i926].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -8579,10 +8786,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_args::write(::apache::thrift::
xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->new_parts.size()));
- std::vector<PartitionSpec> ::const_iterator _iter925;
- for (_iter925 = this->new_parts.begin(); _iter925 != this->new_parts.end(); ++_iter925)
+ std::vector<PartitionSpec> ::const_iterator _iter927;
+ for (_iter927 = this->new_parts.begin(); _iter927 != this->new_parts.end(); ++_iter927)
{
- xfer += (*_iter925).write(oprot);
+ xfer += (*_iter927).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -8606,10 +8813,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_pargs::write(::apache::thrift:
xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>((*(this->new_parts)).size()));
- std::vector<PartitionSpec> ::const_iterator _iter926;
- for (_iter926 = (*(this->new_parts)).begin(); _iter926 != (*(this->new_parts)).end(); ++_iter926)
+ std::vector<PartitionSpec> ::const_iterator _iter928;
+ for (_iter928 = (*(this->new_parts)).begin(); _iter928 != (*(this->new_parts)).end(); ++_iter928)
{
- xfer += (*_iter926).write(oprot);
+ xfer += (*_iter928).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -8834,14 +9041,14 @@ uint32_t ThriftHiveMetastore_append_partition_args::read(::apache::thrift::proto
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->part_vals.clear();
- uint32_t _size927;
- ::apache::thrift::protocol::TType _etype930;
- xfer += iprot->readListBegin(_etype930, _size927);
- this->part_vals.resize(_size927);
- uint32_t _i931;
- for (_i931 = 0; _i931 < _size927; ++_i931)
+ uint32_t _size929;
+ ::apache::thrift::protocol::TType _etype932;
+ xfer += iprot->readListBegin(_etype932, _size929);
+ this->part_vals.resize(_size929);
+ uint32_t _i933;
+ for (_i933 = 0; _i933 < _size929; ++_i933)
{
- xfer += iprot->readString(this->part_vals[_i931]);
+ xfer += iprot->readString(this->part_vals[_i933]);
}
xfer += iprot->readListEnd();
}
@@ -8878,10 +9085,10 @@ uint32_t ThriftHiveMetastore_append_partition_args::write(::apache::thrift::prot
xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
- std::vector<std::string> ::const_iterator _iter932;
- for (_iter932 = this->part_vals.begin(); _iter932 != this->part_vals.end(); ++_iter932)
+ std::vector<std::string> ::const_iterator _iter934;
+ for (_iter934 = this->part_vals.begin(); _iter934 != this->part_vals.end(); ++_iter934)
{
- xfer += oprot->writeString((*_iter932));
+ xfer += oprot->writeString((*_iter934));
}
xfer += oprot->writeListEnd();
}
@@ -8913,10 +9120,10 @@ uint32_t ThriftHiveMetastore_append_partition_pargs::write(::apache::thrift::pro
xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
- std::vector<std::string> ::const_iterator _iter933;
- for (_iter933 = (*(this->part_vals)).begin(); _iter933 != (*(this->part_vals)).end(); ++_iter933)
+ std::vector<std::string> ::const_iterator _iter935;
+ for (_iter935 = (*(this->part_vals)).begin(); _iter935 != (*(this->part_vals)).end(); ++_iter935)
{
- xfer += oprot->writeString((*_iter933));
+ xfer += oprot->writeString((*_iter935));
}
xfer += oprot->writeListEnd();
}
@@ -9388,14 +9595,14 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_args::rea
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->part_vals.clear();
- uint32_t _size934;
- ::apache::thrift::protocol::TType _etype937;
- xfer += iprot->readListBegin(_etype937, _size934);
- this->part_vals.resize(_size934);
- uint32_t _i938;
- for (_i938 = 0; _i938 < _size934; ++_i938)
+ uint32_t _size936;
+ ::apache::thrift::protocol::TType _etype939;
+ xfer += iprot->readListBegin(_etype939, _size936);
+ this->part_vals.resize(_size936);
+ uint32_t _i940;
+ for (_i940 = 0; _i940 < _size936; ++_i940)
{
- xfer += iprot->readString(this->part_vals[_i938]);
+ xfer += iprot->readString(this->part_vals[_i940]);
}
xfer += iprot->readListEnd();
}
@@ -9440,10 +9647,10 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_args::wri
xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
- std::vector<std::string> ::const_iterator _iter939;
- for (_iter939 = this->part_vals.begin(); _iter939 != this->part_vals.end(); ++_iter939)
+ std::vector<std::string> ::const_iterator _iter941;
+ for (_iter941 = this->part_vals.begin(); _iter941 != this->part_vals.end(); ++_iter941)
{
- xfer += oprot->writeString((*_iter939));
+ xfer += oprot->writeString((*_iter941));
}
xfer += oprot->writeListEnd();
}
@@ -9479,10 +9686,10 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_pargs::wr
xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
- std::vector<std::string> ::const_iterator _iter940;
- for (_iter940 = (*(this->part_vals)).begin(); _iter940 != (*(this->part_vals)).end(); ++_iter940)
+ std::vector<std::string> ::const_iterator _iter942;
+ for (_iter942 = (*(this->part_vals)).begin(); _iter942 != (*(this->part_vals)).end(); ++_iter942)
{
- xfer += oprot->writeString((*_iter940));
+ xfer += oprot->writeString((*_iter942));
}
xfer += oprot->writeListEnd();
}
@@ -10285,14 +10492,14 @@ uint32_t ThriftHiveMetastore_drop_partition_args::read(::apache::thrift::protoco
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->part_vals.clear();
- uint32_t _size941;
- ::apache::thrift::protocol::TType _etype944;
- xfer += iprot->readListBegin(_etype944, _size941);
- this->part_vals.resize(_size941);
- uint32_t _i945;
- for (_i945 = 0; _i945 < _size941; ++_i945)
+ uint32_t _size943;
+ ::apache::thrift::protocol::TType _etype946;
+ xfer += iprot->readListBegin(_etype946, _size943);
+ this->part_vals.resize(_size943);
+ uint32_t _i947;
+ for (_i947 = 0; _i947 < _size943; ++_i947)
{
- xfer += iprot->readString(this->part_vals[_i945]);
+ xfer += iprot->readString(this->part_vals[_i947]);
}
xfer += iprot->readListEnd();
}
@@ -10337,10 +10544,10 @@ uint32_t ThriftHiveMetastore_drop_partition_args::write(::apache::thrift::protoc
xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
- std::vector<std::string> ::const_iterator _iter946;
- for (_iter946 = this->part_vals.begin(); _iter946 != this->part_vals.end(); ++_iter946)
+ std::vector<std::string> ::const_iterator _iter948;
+ for (_iter948 = this->part_vals.begin(); _iter948 != this->part_vals.end(); ++_iter948)
{
- xfer += oprot->writeString((*_iter946));
+ xfer += oprot->writeString((*_iter948));
}
xfer += oprot->writeListEnd();
}
@@ -10376,10 +10583,10 @@ uint32_t ThriftHiveMetastore_drop_partition_pargs::write(::apache::thrift::proto
xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
- std::vector<std::string> ::const_iterator _iter947;
- for (_iter947 = (*(this->part_vals)).begin(); _iter947 != (*(this->part_vals)).end(); ++_iter947)
+ std::vector<std::string> ::const_iterator _iter949;
+ for (_iter949 = (*(this->part_vals)).begin(); _iter949 != (*(this->part_vals)).end(); ++_iter949)
{
- xfer += oprot->writeString((*_iter947));
+ xfer += oprot->writeString((*_iter949));
}
xfer += oprot->writeListEnd();
}
@@ -10588,14 +10795,14 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_args::read(
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->part_vals.clear();
- uint32_t _size948;
- ::apache::thrift::protocol::TType _etype951;
- xfer += iprot->readListBegin(_etype951, _size948);
- this->part_vals.resize(_size948);
- uint32_t _i952;
- for (_i952 = 0; _i952 < _size948; ++_i952)
+ uint32_t _size950;
+ ::apache::thrift::protocol::TType _etype953;
+ xfer += iprot->readListBegin(_etype953, _size950);
+ this->part_vals.resize(_size950);
+ uint32_t _i954;
+ for (_i954 = 0; _i954 < _size950; ++_i954)
{
- xfer += iprot->readString(this->part_vals[_i952]);
+ xfer += iprot->readString(this->part_vals[_i954]);
}
xfer += iprot->readListEnd();
}
@@ -10648,10 +10855,10 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_args::write
xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
- std::vector<std::string> ::const_iterator _iter953;
- for (_iter953 = this->part_vals.begin(); _iter953 != this->part_vals.end(); ++_iter953)
+ std::vector<std::string> ::const_iterator _iter955;
+ for (_iter955 = this->part_vals.begin(); _iter955 != this->part_vals.end(); ++_iter955)
{
- xfer += oprot->writeString((*_iter953));
+ xfer += oprot->writeString((*_iter955));
}
xfer += oprot->writeListEnd();
}
@@ -10691,10 +10898,10 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_pargs::writ
xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
- std::vector<std::string> ::const_iterator _iter954;
- for (_iter954 = (*(this->part_vals)).begin(); _iter954 != (*(this->part_vals)).end(); ++_iter954)
+ std::vector<std::string> ::const_iterator _iter956;
+ for (_iter956 = (*(this->part_vals)).begin(); _iter956 != (*(this->part_vals)).end(); ++_iter956)
{
- xfer += oprot->writeString((*_iter954));
+ xfer += oprot->writeString((*_iter956));
}
xfer += oprot->writeListEnd();
}
@@ -11700,14 +11907,14 @@ uint32_t ThriftHiveMetastore_get_partition_args::read(::apache::thrift::protocol
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->part_vals.clear();
- uint32_t _size955;
- ::apache::thrift::protocol::TType _etype958;
- xfer += iprot->readListBegin(_etype958, _size955);
- this->part_vals.resize(_size955);
- uint32_t _i959;
- for (_i959 = 0; _i959 < _size955; ++_i959)
+ uint32_t _size957;
+ ::apache::thrift::protocol::TType _etype960;
+ xfer += iprot->readListBegin(_etype960, _size957);
+ this->part_vals.resize(_size957);
+ uint32_t _i961;
+ for (_i961 = 0; _i961 < _size957; ++_i961)
{
- xfer += iprot->readString(this->part_vals[_i959]);
+ xfer += iprot->readString(this->part_vals[_i961]);
}
xfer += iprot->readListEnd();
}
@@ -11744,10 +11951,10 @@ uint32_t ThriftHiveMetastore_get_partition_args::write(::apache::thrift::protoco
xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
- std::vector<std::string> ::const_iterator _iter960;
- for (_iter960 = this->part_vals.begin(); _iter960 != this->part_vals.end(); ++_iter960)
+ std::vector<std::string> ::const_iterator _iter962;
+ for (_iter962 = this->part_vals.begin(); _iter962 != this->part_vals.end(); ++_iter962)
{
- xfer += oprot->writeString((*_iter960));
+ xfer += oprot->writeString((*_iter962));
}
xfer += oprot->writeListEnd();
}
@@ -11779,10 +11986,10 @@ uint32_t ThriftHiveMetastore_get_partition_pargs::write(::apache::thrift::protoc
xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
- std::vector<std::string> ::const_iterator _iter961;
- for (_iter961 = (*(this->part_vals)).begin(); _iter961 != (*(this->part_vals)).end(); ++_iter961)
+ std::vector<std::string> ::const_iterator _iter963;
+ for (_iter963 = (*(this->part_vals)).begin(); _iter963 != (*(this->part_vals)).end(); ++_iter963)
{
- xfer += oprot->writeString((*_iter961));
+ xfer += oprot->writeString((*_iter963));
}
xfer += oprot->writeListEnd();
}
@@ -11971,17 +12178,17 @@ uint32_t ThriftHiveMetastore_exchange_partition_args::read(::apache::thrift::pro
if (ftype == ::apache::thrift::protocol::T_MAP) {
{
this->partitionSpecs.clear();
- uint32_t _size962;
- ::apache::thrift::protocol::TType _ktype963;
- ::apache::thrift::protocol::TType _vtype964;
- xfer += iprot->readMapBegin(_ktype963, _vtype964, _size962);
- uint32_t _i966;
- for (_i966 = 0; _i966 < _size962; ++_i966)
+ uint32_t _size964;
+ ::apache::thrift::protocol::TType _ktype965;
+ ::apache::thrift::protocol::TType _vtype966;
+ xfer += iprot->readMapBegin(_ktype965, _vtype966, _size964);
+ uint32_t _i968;
+ for (_i968 = 0; _i968 < _size964; ++_i968)
{
- std::string _key967;
- xfer += iprot->readString(_key967);
- std::string& _val968 = this->partitionSpecs[_key967];
- xfer += iprot->readString(_val968);
+ std::string _key969;
+ xfer += iprot->readString(_key969);
+ std::string& _val970 = this->partitionSpecs[_key969];
+ xfer += iprot->readString(_val970);
}
xfer += iprot->readMapEnd();
}
@@ -12042,11 +12249,11 @@ uint32_t ThriftHiveMetastore_exchange_partition_args::write(::apache::thrift::pr
xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1);
{
xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->partitionSpecs.size()));
- std::map<std::string, std::string> ::const_iterator _iter969;
- for (_iter969 = this->partitionSpecs.begin(); _iter969 != this->partitionSpecs.end(); ++_iter969)
+ std::map<std::string, std::string> ::const_iterator _iter971;
+ for (_iter971 = this->partitionSpecs.begin(); _iter971 != this->partitionSpecs.end(); ++_iter971)
{
- xfer += oprot->writeString(_iter969->first);
- xfer += oprot->writeString(_iter969->second);
+ xfer += oprot->writeString(_iter971->first);
+ xfer += oprot->writeString(_iter971->second);
}
xfer += oprot->writeMapEnd();
}
@@ -12086,11 +12293,11 @@ uint32_t ThriftHiveMetastore_exchange_partition_pargs::write(::apache::thrift::p
xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1);
{
xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->partitionSpecs)).size()));
- std::map<std::string, std::string> ::const_iterator _iter970;
- for (_iter970 = (*(this->partitionSpecs)).begin(); _iter970 != (*(this->partitionSpecs)).end(); ++_iter970)
+ std::map<std::string, std::string> ::const_iterator _iter972;
+ for (_iter972 = (*(this->partitionSpecs)).begin(); _iter972 != (*(this->partitionSpecs)).end(); ++_iter972)
{
- xfer += oprot->writeString(_iter970->first);
- xfer += oprot->writeString(_iter970->second);
+ xfer += oprot->writeString(_iter972->first);
+ xfer += oprot->writeString(_iter972->second);
}
xfer += oprot->writeMapEnd();
}
@@ -12335,17 +12542,17 @@ uint32_t ThriftHiveMetastore_exchange_partitions_args::read(::apache::thrift::pr
if (ftype == ::apache::thrift::protocol::T_MAP) {
{
this->partitionSpecs.clear();
- uint32_t _size971;
- ::apache::thrift::protocol::TType _ktype972;
- ::apache::thrift::protocol::TType _vtype973;
- xfer += iprot->readMapBegin(_ktype972, _vtype973, _size971);
- uint32_t _i975;
- for (_i975 = 0; _i975 < _size971; ++_i975)
+ uint32_t _size973;
+ ::apache::thrift::protocol::TType _ktype974;
+ ::apache::thrift::protocol::TType _vtype975;
+ xfer += iprot->readMapBegin(_ktype974, _vtype975, _size973);
+ uint32_t _i977;
+ for (_i977 = 0; _i977 < _size973; ++_i977)
{
- std::string _key976;
- xfer += iprot->readString(_key976);
- std::string& _val977 = this->partitionSpecs[_key976];
- xfer += iprot->readString(_val977);
+ std::string _key978;
+ xfer += iprot->readString(_key978);
+ std::string& _val979 = this->partitionSpecs[_key978];
+ xfer += iprot->readString(_val979);
}
xfer += iprot->readMapEnd();
}
@@ -12406,11 +12613,11 @@ uint32_t ThriftHiveMetastore_exchange_partitions_args::write(::apache::thrift::p
xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1);
{
xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->partitionSpecs.size()));
- std::map<std::string, std::string> ::const_iterator _iter978;
- for (_iter978 = this->partitionSpecs.begin(); _iter978 != this->partitionSpecs.end(); ++_iter978)
+ std::map<std::string, std::string> ::const_iterator _iter980;
+ for (_iter980 = this->partitionSpecs.begin(); _iter980 != this->partitionSpecs.end(); ++_iter980)
{
- xfer += oprot->writeString(_iter978->first);
- xfer += oprot->writeString(_iter978->second);
+ xfer += oprot->writeString(_iter980->first);
+ xfer += oprot->writeString(_iter980->second);
}
xfer += oprot->writeMapEnd();
}
@@ -12450,11 +12657,11 @@ uint32_t ThriftHiveMetastore_exchange_partitions_pargs::write(::apache::thrift::
xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1);
{
xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->partitionSpecs)).size()));
- std::map<std::string, std::string> ::const_iterator _iter979;
- for (_iter979 = (*(this->partitionSpecs)).begin(); _iter979 != (*(this->partitionSpecs)).end(); ++_iter979)
+ std::map<std::string, std::string> ::const_iterator _iter981;
+ for (_iter981 = (*(this->partitionSpecs)).begin(); _iter981 != (*(this->partitionSpecs)).end(); ++_iter981)
{
- xfer += oprot->writeString(_iter979->first);
- xfer += oprot->writeString(_iter979->second);
+ xfer += oprot->writeString(_iter981->first);
+ xfer += oprot->writeString(_iter981->second);
}
xfer += oprot->writeMapEnd();
}
@@ -12511,14 +12718,14 @@ uint32_t ThriftHiveMetastore_exchange_partitions_result::read(::apache::thrift::
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size980;
- ::apache::thrift::protocol::TType _etype983;
- xfer += iprot->readListBegin(_etype983, _size980);
- this->success.resize(_size980);
- uint32_t _i984;
- for (_i984 = 0; _i984 < _size980; ++_i984)
+ uint32_t _size982;
+ ::apache::thrift::protocol::TType _etype985;
+ xfer += iprot->readListBegin(_etype985, _size982);
+ this->success.resize(_size982);
+ uint32_t _i986;
+ for (_i986 = 0; _i986 < _size982; ++_i986)
{
- xfer += this->success[_i984].read(iprot);
+ xfer += this->success[_i986].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -12581,10 +12788,10 @@ uint32_t ThriftHiveMetastore_exchange_partitions_result::write(::apache::thrift:
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
- std::vector<Partition> ::const_iterator _iter985;
- for (_iter985 = this->success.begin(); _iter985 != this->success.end(); ++_iter985)
+ std::vector<Partition> ::const_iterator _iter987;
+ for (_iter987 = this->success.begin(); _iter987 != this->success.end(); ++_iter987)
{
- xfer += (*_iter985).write(oprot);
+ xfer += (*_iter987).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -12641,14 +12848,14 @@ uint32_t ThriftHiveMetastore_exchange_partitions_presult::read(::apache::thrift:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size986;
- ::apache::thrift::protocol::TType _etype989;
- xfer += iprot->readListBegin(_etype989, _size986);
- (*(this->success)).resize(_size986);
- uint32_t _i990;
- for (_i990 = 0; _i990 < _size986; ++_i990)
+ uint32_t _size988;
+ ::apache::thrift::protocol::TType _etype991;
+ xfer += iprot->readListBegin(_etype991, _size988);
+ (*(this->success)).resize(_size988);
+ uint32_t _i992;
+ for (_i992 = 0; _i992 < _size988; ++_i992)
{
- xfer += (*(this->success))[_i990].read(iprot);
+ xfer += (*(this->success))[_i992].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -12747,14 +12954,14 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::read(::apache::thrift
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->part_vals.clear();
- uint32_t _size991;
- ::apache::thrift::protocol::TType _etype994;
- xfer += iprot->readListBegin(_etype994, _size991);
- this->part_vals.resize(_size991);
- uint32_t _i995;
- for (_i995 = 0; _i995 < _size991; ++_i995)
+ uint32_t _size993;
+ ::apache::thrift::protocol::TType _etype996;
+ xfer += iprot->readListBegin(_etype996, _size993);
+ this->part_vals.resize(_size993);
+ uint32_t _i997;
+ for (_i997 = 0; _i997 < _size993; ++_i997)
{
- xfer += iprot->readString(this->part_vals[_i995]);
+ xfer += iprot->readString(this->part_vals[_i997]);
}
xfer += iprot->readListEnd();
}
@@ -12775,14 +12982,14 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::read(::apache::thrift
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->group_names.clear();
- uint32_t _size996;
- ::apache::thrift::protocol::TType _etype999;
- xfer += iprot->readListBegin(_etype999, _size996);
- this->group_names.resize(_size996);
- uint32_t _i1000;
- for (_i1000 = 0; _i1000 < _size996; ++_i1000)
+ uint32_t _size998;
+ ::apache::thrift::protocol::TType _etype1001;
+ xfer += iprot->readListBegin(_etype1001, _size998);
+ this->group_names.resize(_size998);
+ uint32_t _i1002;
+ for (_i1002 = 0; _i1002 < _size998; ++_i1002)
{
- xfer += iprot->readString(this->group_names[_i1000]);
+ xfer += iprot->readString(this->group_names[_i1002]);
}
xfer += iprot->readListEnd();
}
@@ -12819,10 +13026,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::write(::apache::thrif
xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
- std::vector<std::string> ::const_iterator _iter1001;
- for (_iter1001 = this->part_vals.begin(); _iter1001 != this->part_vals.end(); ++_iter1001)
+ std::vector<std::string> ::const_iterator _iter1003;
+ for (_iter1003 = this->part_vals.begin(); _iter1003 != this->part_vals.end(); ++_iter1003)
{
- xfer += oprot->writeString((*_iter1001));
+ xfer += oprot->writeString((*_iter1003));
}
xfer += oprot->writeListEnd();
}
@@ -12835,10 +13042,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::write(::apache::thrif
xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->group_names.size()));
- std::vector<std::string> ::const_iterator _iter1002;
- for (_iter1002 = this->group_names.begin(); _iter1002 != this->group_names.end(); ++_iter1002)
+ std::vector<std::string> ::const_iterator _iter1004;
+ for (_iter1004 = this->group_names.begin(); _iter1004 != this->group_names.end(); ++_iter1004)
{
- xfer += oprot->writeString((*_iter1002));
+ xfer += oprot->writeString((*_iter1004));
}
xfer += oprot->writeListEnd();
}
@@ -12870,10 +13077,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_pargs::write(::apache::thri
xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
- std::vector<std::string> ::const_iterator _iter1003;
- for (_iter1003 = (*(this->part_vals)).begin(); _iter1003 != (*(this->part_vals)).end(); ++_iter1003)
+ std::vector<std::string> ::const_iterator _iter1005;
+ for (_iter1005 = (*(this->part_vals)).begin(); _iter1005 != (*(this->part_vals)).end(); ++_iter1005)
{
- xfer += oprot->writeString((*_iter1003));
+ xfer += oprot->writeString((*_iter1005));
}
xfer += oprot->writeListEnd();
}
@@ -12886,10 +13093,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_pargs::write(::apache::thri
xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->group_names)).size()));
- std::vector<std::string> ::const_iterator _iter1004;
- for (_iter1004 = (*(this->group_names)).begin(); _iter1004 != (*(this->group_names)).end(); ++_iter1004)
+ std::vector<std::string> ::const_iterator _iter1006;
+ for (_iter1006 = (*(this->group_names)).begin(); _iter1006 != (*(this->group_names)).end(); ++_iter1006)
{
- xfer += oprot->writeString((*_iter1004));
+ xfer += oprot->writeString((*_iter1006));
}
xfer += oprot->writeListEnd();
}
@@ -13448,14 +13655,14 @@ uint32_t ThriftHiveMetastore_get_partitions_result::read(::apache::thrift::proto
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size1005;
- ::apache::thrift::protocol::TType _etype1008;
- xfer += iprot->readListBegin(_etype1008, _size1005);
- this->success.resize(_size1005);
- uint32_t _i1009;
- for (_i1009 = 0; _i1009 < _size1005; ++_i1009)
+ uint32_t _size1007;
+ ::apache::thrift::protocol::TType _etype1010;
+ xfer += iprot->readListBegin(_etype1010, _size1007);
+ this->success.resize(_size1007);
+ uint32_t _i1011;
+ for (_i1011 = 0; _i1011 < _size1007; ++_i1011)
{
- xfer += this->success[_i1009].read(iprot);
+ xfer += this->success[_i1011].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -13502,10 +13709,10 @@ uint32_t ThriftHiveMetastore_get_partitions_result::write(::apache::thrift::prot
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
- std::vector<Partition> ::const_iterator _iter1010;
- for (_iter1010 = this->success.begin(); _iter1010 != this->success.end(); ++_iter1010)
+ std::vector<Partition> ::const_iterator _iter1012;
+ for (_iter1012 = this->success.begin(); _iter1012 != this->success.end(); ++_iter1012)
{
- xfer += (*_iter1010).write(oprot);
+ xfer += (*_iter1012).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -13554,14 +13761,14 @@ uint32_t ThriftHiveMetastore_get_partitions_presult::read(::apache::thrift::prot
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size1011;
- ::apache::thrift::protocol::TType _etype1014;
- xfer += iprot->readListBegin(_etype1014, _size1011);
- (*(this->success)).resize(_size1011);
- uint32_t _i1015;
- for (_i1015 = 0; _i1015 < _size1011; ++_i1015)
+ uint32_t _size1013;
+ ::apache::thrift::protocol::TType _etype1016;
+ xfer += iprot->readListBegin(_etype1016, _size1013);
+ (*(this->success)).resize(_size1013);
+ uint32_t _i1017;
+ for (_i1017 = 0; _i1017 < _size1013; ++_i1017)
{
- xfer += (*(this->success))[_i1015].read(iprot);
+ xfer += (*(this->success))[_i1017].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -13660,14 +13867,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_args::read(::apache::thrif
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->group_names.clear();
- uint32_t _size1016;
- ::apache::thrift::protocol::TType _etype1019;
- xfer += iprot->readListBegin(_etype1019, _size1016);
- this->group_names.resize(_size1016);
- uint32_t _i1020;
- for (_i1020 = 0; _i1020 < _size1016; ++_i1020)
+ uint32_t _size1018;
+ ::apache::thrift::protocol::TType _etype1021;
+ xfer += iprot->readListBegin(_etype1021, _size1018);
+ this->group_names.resize(_size1018);
+ uint32_t _i1022;
+ for (_i1022 = 0; _i1022 < _size1018; ++_i1022)
{
- xfer += iprot->readString(this->group_names[_i1020]);
+ xfer += iprot->readString(this->group_names[_i1022]);
}
xfer += iprot->readListEnd();
}
@@ -13712,10 +13919,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_args::write(::apache::thri
xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->group_names.size()));
- std::vector<std::string> ::const_iterator _iter1021;
- for (_iter1021 = this->group_names.begin(); _iter1021 != this->group_names.end(); ++_iter1021)
+ std::vector<std::string> ::const_iterator _iter1023;
+ for (_iter1023 = this->group_names.begin(); _iter1023 != this->group_names.end(); ++_iter1023)
{
- xfer += oprot->writeString((*_iter1021));
+ xfer += oprot->writeString((*_iter1023));
}
xfer += oprot->writeListEnd();
}
@@ -13755,10 +13962,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_pargs::write(::apache::thr
xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->group_names)).size()));
- std::vector<std::string> ::const_iterator _iter1022;
- for (_iter1022 = (*(this->group_names)).begin(); _iter1022 != (*(this->group_names)).end(); ++_iter1022)
+ std::vector<std::string> ::const_iterator _iter1024;
+ for (_iter1024 = (*(this->group_names)).begin(); _iter1024 != (*(this->group_names)).end(); ++_iter1024)
{
- xfer += oprot->writeString((*_iter1022));
+ xfer += oprot->writeString((*_iter1024));
}
xfer += oprot->writeListEnd();
}
@@ -13799,14 +14006,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_result::read(::apache::thr
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size1023;
- ::apache::thrift::protocol::TType _etype1026;
- xfer += iprot->readListBegin(_etype1026, _size1023);
- this->success.resize(_size1023);
- uint32_t _i1027;
- for (_i1027 = 0; _i1027 < _size1023; ++_i1027)
+ uint32_t _size1025;
+ ::apache::thrift::protocol::TType _etype1028;
+ xfer += iprot->readListBegin(_etype1028, _size1025);
+ this->success.resize(_size1025);
+ uint32_t _i1029;
+ for (_i1029 = 0; _i1029 < _size1025; ++_i1029)
{
- xfer += this->success[_i1027].read(iprot);
+
<TRUNCATED>
[11/20] hive git commit: HIVE-13660: Vectorizing IN expression with
list of columns throws java.lang.ClassCastException ExprNodeColumnDesc cannot
be cast to ExprNodeConstantDesc (Matt McCline,
reviewed by Prasanth Jayachandran)
Posted by jd...@apache.org.
HIVE-13660: Vectorizing IN expression with list of columns throws java.lang.ClassCastException ExprNodeColumnDesc cannot be cast to ExprNodeConstantDesc (Matt McCline, reviewed by Prasanth Jayachandran)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/e68783c8
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/e68783c8
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/e68783c8
Branch: refs/heads/llap
Commit: e68783c8e5cdb0cc00db6d725f15392bd5a6fe06
Parents: 652f88a
Author: Matt McCline <mm...@hortonworks.com>
Authored: Wed May 4 14:59:00 2016 -0700
Committer: Matt McCline <mm...@hortonworks.com>
Committed: Wed May 4 14:59:30 2016 -0700
----------------------------------------------------------------------
.../ql/exec/vector/VectorizationContext.java | 7 ++++
.../vector_non_constant_in_expr.q | 4 +++
.../vector_non_constant_in_expr.q.out | 36 ++++++++++++++++++++
3 files changed, 47 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/e68783c8/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
index 5454ba3..9558d31 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
@@ -1519,6 +1519,13 @@ public class VectorizationContext {
VectorExpression expr = null;
+ // Validate the IN items are only constants.
+ for (ExprNodeDesc inListChild : childrenForInList) {
+ if (!(inListChild instanceof ExprNodeConstantDesc)) {
+ throw new HiveException("Vectorizing IN expression only supported for constant values");
+ }
+ }
+
// determine class
Class<?> cl = null;
if (isIntFamily(colType)) {
http://git-wip-us.apache.org/repos/asf/hive/blob/e68783c8/ql/src/test/queries/clientpositive/vector_non_constant_in_expr.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_non_constant_in_expr.q b/ql/src/test/queries/clientpositive/vector_non_constant_in_expr.q
new file mode 100644
index 0000000..69142bf
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/vector_non_constant_in_expr.q
@@ -0,0 +1,4 @@
+SET hive.vectorized.execution.enabled=true;
+set hive.fetch.task.conversion=none;
+
+explain SELECT * FROM alltypesorc WHERE cint in (ctinyint, cbigint);
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hive/blob/e68783c8/ql/src/test/results/clientpositive/vector_non_constant_in_expr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_non_constant_in_expr.q.out b/ql/src/test/results/clientpositive/vector_non_constant_in_expr.q.out
new file mode 100644
index 0000000..8845cb2
--- /dev/null
+++ b/ql/src/test/results/clientpositive/vector_non_constant_in_expr.q.out
@@ -0,0 +1,36 @@
+PREHOOK: query: explain SELECT * FROM alltypesorc WHERE cint in (ctinyint, cbigint)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain SELECT * FROM alltypesorc WHERE cint in (ctinyint, cbigint)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: alltypesorc
+ Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (cint) IN (ctinyint, cbigint) (type: boolean)
+ Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cbigint (type: bigint), cfloat (type: float), cdouble (type: double), cstring1 (type: string), cstring2 (type: string), ctimestamp1 (type: timestamp), ctimestamp2 (type: timestamp), cboolean1 (type: boolean), cboolean2 (type: boolean)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11
+ Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
[15/20] hive git commit: HIVE-13632: Hive failing on insert empty
array into parquet table. (Yongzhi Chen, reviewed by Sergio Pena)
Posted by jd...@apache.org.
HIVE-13632: Hive failing on insert empty array into parquet table. (Yongzhi Chen, reviewed by Sergio Pena)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/96f2dc72
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/96f2dc72
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/96f2dc72
Branch: refs/heads/llap
Commit: 96f2dc723270bb4c38e5ab842371929c2c1c849a
Parents: cbebb4d
Author: Yongzhi Chen <yc...@apache.org>
Authored: Thu Apr 28 14:52:16 2016 -0400
Committer: Yongzhi Chen <yc...@apache.org>
Committed: Thu May 5 09:58:39 2016 -0400
----------------------------------------------------------------------
.../serde/AbstractParquetMapInspector.java | 4 +-
.../serde/ParquetHiveArrayInspector.java | 4 +-
.../ql/io/parquet/write/DataWritableWriter.java | 67 ++++++++-------
.../ql/io/parquet/TestDataWritableWriter.java | 29 +++++++
.../serde/TestAbstractParquetMapInspector.java | 4 +-
.../serde/TestParquetHiveArrayInspector.java | 4 +-
.../parquet_array_map_emptynullvals.q | 20 +++++
.../parquet_array_map_emptynullvals.q.out | 87 ++++++++++++++++++++
8 files changed, 180 insertions(+), 39 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/96f2dc72/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/AbstractParquetMapInspector.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/AbstractParquetMapInspector.java b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/AbstractParquetMapInspector.java
index 49bf1c5..e80206e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/AbstractParquetMapInspector.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/AbstractParquetMapInspector.java
@@ -60,7 +60,7 @@ public abstract class AbstractParquetMapInspector implements SettableMapObjectIn
if (data instanceof ArrayWritable) {
final Writable[] mapArray = ((ArrayWritable) data).get();
- if (mapArray == null || mapArray.length == 0) {
+ if (mapArray == null) {
return null;
}
@@ -90,7 +90,7 @@ public abstract class AbstractParquetMapInspector implements SettableMapObjectIn
if (data instanceof ArrayWritable) {
final Writable[] mapArray = ((ArrayWritable) data).get();
- if (mapArray == null || mapArray.length == 0) {
+ if (mapArray == null) {
return -1;
} else {
return mapArray.length;
http://git-wip-us.apache.org/repos/asf/hive/blob/96f2dc72/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ParquetHiveArrayInspector.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ParquetHiveArrayInspector.java b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ParquetHiveArrayInspector.java
index 05e92b5..55614a3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ParquetHiveArrayInspector.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ParquetHiveArrayInspector.java
@@ -83,7 +83,7 @@ public class ParquetHiveArrayInspector implements SettableListObjectInspector {
if (data instanceof ArrayWritable) {
final Writable[] array = ((ArrayWritable) data).get();
- if (array == null || array.length == 0) {
+ if (array == null) {
return -1;
}
@@ -105,7 +105,7 @@ public class ParquetHiveArrayInspector implements SettableListObjectInspector {
if (data instanceof ArrayWritable) {
final Writable[] array = ((ArrayWritable) data).get();
- if (array == null || array.length == 0) {
+ if (array == null) {
return null;
}
http://git-wip-us.apache.org/repos/asf/hive/blob/96f2dc72/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/write/DataWritableWriter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/write/DataWritableWriter.java b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/write/DataWritableWriter.java
index 69272dc..1e26c19 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/write/DataWritableWriter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/write/DataWritableWriter.java
@@ -259,21 +259,24 @@ public class DataWritableWriter {
@Override
public void write(Object value) {
recordConsumer.startGroup();
- recordConsumer.startField(repeatedGroupName, 0);
-
int listLength = inspector.getListLength(value);
- for (int i = 0; i < listLength; i++) {
- Object element = inspector.getListElement(value, i);
- recordConsumer.startGroup();
- if (element != null) {
- recordConsumer.startField(elementName, 0);
- elementWriter.write(element);
- recordConsumer.endField(elementName, 0);
+
+ if (listLength > 0) {
+ recordConsumer.startField(repeatedGroupName, 0);
+
+ for (int i = 0; i < listLength; i++) {
+ Object element = inspector.getListElement(value, i);
+ recordConsumer.startGroup();
+ if (element != null) {
+ recordConsumer.startField(elementName, 0);
+ elementWriter.write(element);
+ recordConsumer.endField(elementName, 0);
+ }
+ recordConsumer.endGroup();
}
- recordConsumer.endGroup();
- }
- recordConsumer.endField(repeatedGroupName, 0);
+ recordConsumer.endField(repeatedGroupName, 0);
+ }
recordConsumer.endGroup();
}
}
@@ -307,30 +310,32 @@ public class DataWritableWriter {
@Override
public void write(Object value) {
recordConsumer.startGroup();
- recordConsumer.startField(repeatedGroupName, 0);
Map<?, ?> mapValues = inspector.getMap(value);
- for (Map.Entry<?, ?> keyValue : mapValues.entrySet()) {
- recordConsumer.startGroup();
- if (keyValue != null) {
- // write key element
- Object keyElement = keyValue.getKey();
- recordConsumer.startField(keyName, 0);
- keyWriter.write(keyElement);
- recordConsumer.endField(keyName, 0);
-
- // write value element
- Object valueElement = keyValue.getValue();
- if (valueElement != null) {
- recordConsumer.startField(valueName, 1);
- valueWriter.write(valueElement);
- recordConsumer.endField(valueName, 1);
+ if (mapValues != null && mapValues.size() > 0) {
+ recordConsumer.startField(repeatedGroupName, 0);
+ for (Map.Entry<?, ?> keyValue : mapValues.entrySet()) {
+ recordConsumer.startGroup();
+ if (keyValue != null) {
+ // write key element
+ Object keyElement = keyValue.getKey();
+ recordConsumer.startField(keyName, 0);
+ keyWriter.write(keyElement);
+ recordConsumer.endField(keyName, 0);
+
+ // write value element
+ Object valueElement = keyValue.getValue();
+ if (valueElement != null) {
+ recordConsumer.startField(valueName, 1);
+ valueWriter.write(valueElement);
+ recordConsumer.endField(valueName, 1);
+ }
}
+ recordConsumer.endGroup();
}
- recordConsumer.endGroup();
- }
- recordConsumer.endField(repeatedGroupName, 0);
+ recordConsumer.endField(repeatedGroupName, 0);
+ }
recordConsumer.endGroup();
}
}
http://git-wip-us.apache.org/repos/asf/hive/blob/96f2dc72/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/TestDataWritableWriter.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/TestDataWritableWriter.java b/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/TestDataWritableWriter.java
index 7049139..934ae9f 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/TestDataWritableWriter.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/TestDataWritableWriter.java
@@ -411,6 +411,35 @@ public class TestDataWritableWriter {
}
@Test
+ public void testEmptyArrays() throws Exception {
+ String columnNames = "arrayCol";
+ String columnTypes = "array<int>";
+
+ String fileSchema = "message hive_schema {\n"
+ + " optional group arrayCol (LIST) {\n"
+ + " repeated group array {\n"
+ + " optional int32 array_element;\n"
+ + " }\n"
+ + " }\n"
+ + "}\n";
+
+ ArrayWritable hiveRecord = createGroup(
+ new ArrayWritable(Writable.class) // Empty array
+ );
+
+ // Write record to Parquet format
+ writeParquetRecord(fileSchema, getParquetWritable(columnNames, columnTypes, hiveRecord));
+
+ // Verify record was written correctly to Parquet
+ startMessage();
+ startField("arrayCol", 0);
+ startGroup();
+ endGroup();
+ endField("arrayCol", 0);
+ endMessage();
+ }
+
+ @Test
public void testArrayOfArrays() throws Exception {
String columnNames = "array_of_arrays";
String columnTypes = "array<array<int>>";
http://git-wip-us.apache.org/repos/asf/hive/blob/96f2dc72/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/serde/TestAbstractParquetMapInspector.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/serde/TestAbstractParquetMapInspector.java b/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/serde/TestAbstractParquetMapInspector.java
index f5d9cb4..6af8c53 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/serde/TestAbstractParquetMapInspector.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/serde/TestAbstractParquetMapInspector.java
@@ -62,8 +62,8 @@ public class TestAbstractParquetMapInspector extends TestCase {
@Test
public void testEmptyContainer() {
final ArrayWritable map = new ArrayWritable(ArrayWritable.class, new ArrayWritable[0]);
- assertEquals("Wrong size", -1, inspector.getMapSize(map));
- assertNull("Should be null", inspector.getMap(map));
+ assertEquals("Wrong size", 0, inspector.getMapSize(map));
+ assertNotNull("Should not be null", inspector.getMap(map));
}
@Test
http://git-wip-us.apache.org/repos/asf/hive/blob/96f2dc72/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/serde/TestParquetHiveArrayInspector.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/serde/TestParquetHiveArrayInspector.java b/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/serde/TestParquetHiveArrayInspector.java
index 0ce654d..9e0c1ff 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/serde/TestParquetHiveArrayInspector.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/serde/TestParquetHiveArrayInspector.java
@@ -51,8 +51,8 @@ public class TestParquetHiveArrayInspector extends TestCase {
@Test
public void testEmptyContainer() {
final ArrayWritable list = new ArrayWritable(ArrayWritable.class, new ArrayWritable[0]);
- assertEquals("Wrong size", -1, inspector.getListLength(list));
- assertNull("Should be null", inspector.getList(list));
+ assertEquals("Wrong size", 0, inspector.getListLength(list));
+ assertNotNull("Should not be null", inspector.getList(list));
assertNull("Should be null", inspector.getListElement(list, 0));
}
http://git-wip-us.apache.org/repos/asf/hive/blob/96f2dc72/ql/src/test/queries/clientpositive/parquet_array_map_emptynullvals.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/parquet_array_map_emptynullvals.q b/ql/src/test/queries/clientpositive/parquet_array_map_emptynullvals.q
new file mode 100644
index 0000000..eeae5cf
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/parquet_array_map_emptynullvals.q
@@ -0,0 +1,20 @@
+drop table if exists testSets;
+drop table if exists testSets2;
+create table testSets (
+key string,
+arrayValues array<string>,
+mapValues map<string,string>)
+stored as parquet;
+
+insert into table testSets select 'abcd', array(), map() from src limit 1;
+
+create table testSets2 (
+key string,
+arrayValues array<string>,
+mapValues map<string,string>)
+stored as parquet;
+insert into table testSets2 select * from testSets;
+select * from testSets2;
+drop table testSets;
+drop table testSets2;
+
http://git-wip-us.apache.org/repos/asf/hive/blob/96f2dc72/ql/src/test/results/clientpositive/parquet_array_map_emptynullvals.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/parquet_array_map_emptynullvals.q.out b/ql/src/test/results/clientpositive/parquet_array_map_emptynullvals.q.out
new file mode 100644
index 0000000..4608607
--- /dev/null
+++ b/ql/src/test/results/clientpositive/parquet_array_map_emptynullvals.q.out
@@ -0,0 +1,87 @@
+PREHOOK: query: drop table if exists testSets
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists testSets
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: drop table if exists testSets2
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists testSets2
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table testSets (
+key string,
+arrayValues array<string>,
+mapValues map<string,string>)
+stored as parquet
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@testSets
+POSTHOOK: query: create table testSets (
+key string,
+arrayValues array<string>,
+mapValues map<string,string>)
+stored as parquet
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@testSets
+PREHOOK: query: insert into table testSets select 'abcd', array(), map() from src limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@testsets
+POSTHOOK: query: insert into table testSets select 'abcd', array(), map() from src limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@testsets
+POSTHOOK: Lineage: testsets.arrayvalues EXPRESSION []
+POSTHOOK: Lineage: testsets.key SIMPLE []
+POSTHOOK: Lineage: testsets.mapvalues EXPRESSION []
+PREHOOK: query: create table testSets2 (
+key string,
+arrayValues array<string>,
+mapValues map<string,string>)
+stored as parquet
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@testSets2
+POSTHOOK: query: create table testSets2 (
+key string,
+arrayValues array<string>,
+mapValues map<string,string>)
+stored as parquet
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@testSets2
+PREHOOK: query: insert into table testSets2 select * from testSets
+PREHOOK: type: QUERY
+PREHOOK: Input: default@testsets
+PREHOOK: Output: default@testsets2
+POSTHOOK: query: insert into table testSets2 select * from testSets
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@testsets
+POSTHOOK: Output: default@testsets2
+POSTHOOK: Lineage: testsets2.arrayvalues SIMPLE [(testsets)testsets.FieldSchema(name:arrayvalues, type:array<string>, comment:null), ]
+POSTHOOK: Lineage: testsets2.key SIMPLE [(testsets)testsets.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: testsets2.mapvalues SIMPLE [(testsets)testsets.FieldSchema(name:mapvalues, type:map<string,string>, comment:null), ]
+PREHOOK: query: select * from testSets2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@testsets2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from testSets2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@testsets2
+#### A masked pattern was here ####
+abcd [] {}
+PREHOOK: query: drop table testSets
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@testsets
+PREHOOK: Output: default@testsets
+POSTHOOK: query: drop table testSets
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@testsets
+POSTHOOK: Output: default@testsets
+PREHOOK: query: drop table testSets2
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@testsets2
+PREHOOK: Output: default@testsets2
+POSTHOOK: query: drop table testSets2
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@testsets2
+POSTHOOK: Output: default@testsets2
[19/20] hive git commit: HIVE-13639: CBO rule to pull up constants
through Union (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)
Posted by jd...@apache.org.
HIVE-13639: CBO rule to pull up constants through Union (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/09271872
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/09271872
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/09271872
Branch: refs/heads/llap
Commit: 092718720a4abc77ce74c2efcf42cfef0243e9d4
Parents: f41d693
Author: Jesus Camacho Rodriguez <jc...@apache.org>
Authored: Wed May 4 22:01:52 2016 +0100
Committer: Jesus Camacho Rodriguez <jc...@apache.org>
Committed: Thu May 5 20:21:50 2016 +0100
----------------------------------------------------------------------
.../rules/HiveUnionPullUpConstantsRule.java | 133 ++++
.../hadoop/hive/ql/parse/CalcitePlanner.java | 2 +
.../queries/clientpositive/cbo_union_view.q | 19 +
.../results/clientpositive/cbo_input26.q.out | 64 +-
.../results/clientpositive/cbo_union_view.q.out | 228 ++++++
.../results/clientpositive/groupby_ppd.q.out | 28 +-
.../results/clientpositive/perf/query66.q.out | 328 ++++-----
.../results/clientpositive/perf/query75.q.out | 692 ++++++++++---------
.../clientpositive/spark/union_remove_25.q.out | 48 +-
.../clientpositive/spark/union_view.q.out | 60 +-
.../results/clientpositive/union_view.q.out | 60 +-
11 files changed, 1021 insertions(+), 641 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/09271872/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveUnionPullUpConstantsRule.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveUnionPullUpConstantsRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveUnionPullUpConstantsRule.java
new file mode 100644
index 0000000..3155cb1
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveUnionPullUpConstantsRule.java
@@ -0,0 +1,133 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.optimizer.calcite.rules;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.calcite.plan.RelOptPredicateList;
+import org.apache.calcite.plan.RelOptRule;
+import org.apache.calcite.plan.RelOptRuleCall;
+import org.apache.calcite.plan.RelOptUtil;
+import org.apache.calcite.rel.core.Union;
+import org.apache.calcite.rel.metadata.RelMetadataQuery;
+import org.apache.calcite.rel.type.RelDataTypeField;
+import org.apache.calcite.rex.RexBuilder;
+import org.apache.calcite.rex.RexNode;
+import org.apache.calcite.rex.RexUtil;
+import org.apache.calcite.tools.RelBuilder;
+import org.apache.calcite.tools.RelBuilderFactory;
+import org.apache.calcite.util.Pair;
+import org.apache.calcite.util.mapping.Mappings;
+import org.apache.hadoop.hive.ql.optimizer.calcite.HiveRelFactories;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveUnion;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.collect.ImmutableList;
+
+/**
+ * Planner rule that pulls up constants through a Union operator.
+ */
+public class HiveUnionPullUpConstantsRule extends RelOptRule {
+
+ protected static final Logger LOG = LoggerFactory.getLogger(HiveUnionPullUpConstantsRule.class);
+
+
+ public static final HiveUnionPullUpConstantsRule INSTANCE =
+ new HiveUnionPullUpConstantsRule(HiveUnion.class,
+ HiveRelFactories.HIVE_BUILDER);
+
+ private HiveUnionPullUpConstantsRule(
+ Class<? extends Union> unionClass,
+ RelBuilderFactory relBuilderFactory) {
+ super(operand(unionClass, any()),
+ relBuilderFactory, null);
+ }
+
+ @Override
+ public void onMatch(RelOptRuleCall call) {
+ final Union union = call.rel(0);
+
+ final int count = union.getRowType().getFieldCount();
+ if (count == 1) {
+ // No room for optimization since we cannot create an empty
+ // Project operator.
+ return;
+ }
+
+ final RexBuilder rexBuilder = union.getCluster().getRexBuilder();
+ final RelMetadataQuery mq = RelMetadataQuery.instance();
+ final RelOptPredicateList predicates = mq.getPulledUpPredicates(union);
+ if (predicates == null) {
+ return;
+ }
+
+ Map<RexNode, RexNode> constants = HiveReduceExpressionsRule.predicateConstants(
+ RexNode.class, rexBuilder, predicates);
+
+ // None of the expressions are constant. Nothing to do.
+ if (constants.isEmpty()) {
+ return;
+ }
+
+ if (count == constants.size()) {
+ // At least a single item in project is required.
+ final Map<RexNode, RexNode> map = new HashMap<>(constants);
+ map.remove(map.keySet().iterator().next());
+ constants = map;
+ }
+
+ // Create expressions for Project operators before and after the Union
+ List<RelDataTypeField> fields = union.getRowType().getFieldList();
+ List<Pair<RexNode, String>> newChildExprs = new ArrayList<>();
+ List<RexNode> topChildExprs = new ArrayList<>();
+ List<String> topChildExprsFields = new ArrayList<>();
+ for (int i = 0; i < count ; i++) {
+ RexNode expr = rexBuilder.makeInputRef(union, i);
+ RelDataTypeField field = fields.get(i);
+ if (constants.containsKey(expr)) {
+ topChildExprs.add(constants.get(expr));
+ topChildExprsFields.add(field.getName());
+ } else {
+ newChildExprs.add(Pair.<RexNode,String>of(expr, field.getName()));
+ topChildExprs.add(expr);
+ topChildExprsFields.add(field.getName());
+ }
+ }
+
+ // Update top Project positions
+ final Mappings.TargetMapping mapping =
+ RelOptUtil.permutation(Pair.left(newChildExprs), union.getInput(0).getRowType()).inverse();
+ topChildExprs = ImmutableList.copyOf(RexUtil.apply(mapping, topChildExprs));
+
+ // Create new Project-Union-Project sequences
+ final RelBuilder relBuilder = call.builder();
+ for (int i = 0; i < union.getInputs().size() ; i++) {
+ relBuilder.push(union.getInput(i));
+ relBuilder.project(Pair.left(newChildExprs), Pair.right(newChildExprs));
+ }
+ relBuilder.union(union.all, union.getInputs().size());
+ relBuilder.project(topChildExprs, topChildExprsFields);
+
+ call.transformTo(relBuilder.build());
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hive/blob/09271872/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
index 377573b..de6a053 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
@@ -154,6 +154,7 @@ import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveJoinProjectTranspos
import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveJoinPushTransitivePredicatesRule;
import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveJoinToMultiJoinRule;
import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveSortLimitPullUpConstantsRule;
+import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveUnionPullUpConstantsRule;
import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HivePartitionPruneRule;
import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HivePointLookupOptimizerRule;
import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HivePreFilteringRule;
@@ -1166,6 +1167,7 @@ public class CalcitePlanner extends SemanticAnalyzer {
rules.add(HiveJoinPushTransitivePredicatesRule.INSTANCE_SEMIJOIN);
rules.add(HiveSortMergeRule.INSTANCE);
rules.add(HiveSortLimitPullUpConstantsRule.INSTANCE);
+ rules.add(HiveUnionPullUpConstantsRule.INSTANCE);
perfLogger.PerfLogBegin(this.getClass().getName(), PerfLogger.OPTIMIZER);
basePlan = hepPlan(basePlan, true, mdProvider, executorProvider, HepMatchOrder.BOTTOM_UP,
rules.toArray(new RelOptRule[rules.size()]));
http://git-wip-us.apache.org/repos/asf/hive/blob/09271872/ql/src/test/queries/clientpositive/cbo_union_view.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/cbo_union_view.q b/ql/src/test/queries/clientpositive/cbo_union_view.q
new file mode 100644
index 0000000..d889b1d
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/cbo_union_view.q
@@ -0,0 +1,19 @@
+set hive.mapred.mode=nonstrict;
+set hive.optimize.constant.propagation=false;
+
+CREATE TABLE src_union_1 (key int, value string) PARTITIONED BY (ds string);
+CREATE TABLE src_union_2 (key int, value string) PARTITIONED BY (ds string, part_1 string);
+CREATE TABLE src_union_3 (key int, value string) PARTITIONED BY (ds string, part_1 string, part_2 string);
+
+CREATE VIEW src_union_view PARTITIONED ON (ds) as
+SELECT key, value, ds FROM (
+SELECT key, value, ds FROM src_union_1
+UNION ALL
+SELECT key, value, ds FROM src_union_2
+UNION ALL
+SELECT key, value, ds FROM src_union_3
+) subq;
+
+EXPLAIN SELECT key, value, ds FROM src_union_view WHERE key=86;
+
+EXPLAIN SELECT key, value, ds FROM src_union_view WHERE key=86 AND ds ='1';
http://git-wip-us.apache.org/repos/asf/hive/blob/09271872/ql/src/test/results/clientpositive/cbo_input26.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cbo_input26.q.out b/ql/src/test/results/clientpositive/cbo_input26.q.out
index 5c4c771..c9ed67a 100644
--- a/ql/src/test/results/clientpositive/cbo_input26.q.out
+++ b/ql/src/test/results/clientpositive/cbo_input26.q.out
@@ -490,16 +490,12 @@ STAGE PLANS:
Limit
Number of rows: 5
Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: _col0 (type: string), '2008-04-08' (type: string), _col1 (type: string)
- outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
- File Output Operator
- compressed: false
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+ File Output Operator
+ compressed: false
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
Stage: Stage-2
Map Reduce
@@ -507,23 +503,31 @@ STAGE PLANS:
TableScan
Union
Statistics: Num rows: 6 Data size: 50 Basic stats: COMPLETE Column stats: NONE
- File Output Operator
- compressed: false
+ Select Operator
+ expressions: _col0 (type: string), '2008-04-08' (type: string), _col1 (type: string)
+ outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 6 Data size: 50 Basic stats: COMPLETE Column stats: NONE
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 6 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
TableScan
Union
Statistics: Num rows: 6 Data size: 50 Basic stats: COMPLETE Column stats: NONE
- File Output Operator
- compressed: false
+ Select Operator
+ expressions: _col0 (type: string), '2008-04-08' (type: string), _col1 (type: string)
+ outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 6 Data size: 50 Basic stats: COMPLETE Column stats: NONE
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 6 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Stage: Stage-3
Map Reduce
@@ -554,16 +558,12 @@ STAGE PLANS:
Limit
Number of rows: 5
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
- Select Operator
- expressions: _col0 (type: string), '2008-04-08' (type: string), _col1 (type: string)
- outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
- File Output Operator
- compressed: false
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+ File Output Operator
+ compressed: false
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
Stage: Stage-0
Fetch Operator
http://git-wip-us.apache.org/repos/asf/hive/blob/09271872/ql/src/test/results/clientpositive/cbo_union_view.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cbo_union_view.q.out b/ql/src/test/results/clientpositive/cbo_union_view.q.out
new file mode 100644
index 0000000..ed6bba9
--- /dev/null
+++ b/ql/src/test/results/clientpositive/cbo_union_view.q.out
@@ -0,0 +1,228 @@
+PREHOOK: query: CREATE TABLE src_union_1 (key int, value string) PARTITIONED BY (ds string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_union_1
+POSTHOOK: query: CREATE TABLE src_union_1 (key int, value string) PARTITIONED BY (ds string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_union_1
+PREHOOK: query: CREATE TABLE src_union_2 (key int, value string) PARTITIONED BY (ds string, part_1 string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_union_2
+POSTHOOK: query: CREATE TABLE src_union_2 (key int, value string) PARTITIONED BY (ds string, part_1 string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_union_2
+PREHOOK: query: CREATE TABLE src_union_3 (key int, value string) PARTITIONED BY (ds string, part_1 string, part_2 string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_union_3
+POSTHOOK: query: CREATE TABLE src_union_3 (key int, value string) PARTITIONED BY (ds string, part_1 string, part_2 string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_union_3
+PREHOOK: query: CREATE VIEW src_union_view PARTITIONED ON (ds) as
+SELECT key, value, ds FROM (
+SELECT key, value, ds FROM src_union_1
+UNION ALL
+SELECT key, value, ds FROM src_union_2
+UNION ALL
+SELECT key, value, ds FROM src_union_3
+) subq
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: default@src_union_1
+PREHOOK: Input: default@src_union_2
+PREHOOK: Input: default@src_union_3
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_union_view
+POSTHOOK: query: CREATE VIEW src_union_view PARTITIONED ON (ds) as
+SELECT key, value, ds FROM (
+SELECT key, value, ds FROM src_union_1
+UNION ALL
+SELECT key, value, ds FROM src_union_2
+UNION ALL
+SELECT key, value, ds FROM src_union_3
+) subq
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: default@src_union_1
+POSTHOOK: Input: default@src_union_2
+POSTHOOK: Input: default@src_union_3
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_union_view
+PREHOOK: query: EXPLAIN SELECT key, value, ds FROM src_union_view WHERE key=86
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT key, value, ds FROM src_union_view WHERE key=86
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: src_union_1
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ Filter Operator
+ predicate: (key = 86) (type: boolean)
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ Select Operator
+ expressions: value (type: string), ds (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ Union
+ Statistics: Num rows: 3 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ Select Operator
+ expressions: 86 (type: int), _col0 (type: string), _col1 (type: string)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 3 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 3 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ TableScan
+ alias: src_union_2
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ Filter Operator
+ predicate: (key = 86) (type: boolean)
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ Select Operator
+ expressions: value (type: string), ds (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ Union
+ Statistics: Num rows: 3 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ Select Operator
+ expressions: 86 (type: int), _col0 (type: string), _col1 (type: string)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 3 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 3 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ TableScan
+ alias: src_union_3
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ Filter Operator
+ predicate: (key = 86) (type: boolean)
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ Select Operator
+ expressions: value (type: string), ds (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ Union
+ Statistics: Num rows: 3 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ Select Operator
+ expressions: 86 (type: int), _col0 (type: string), _col1 (type: string)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 3 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 3 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: EXPLAIN SELECT key, value, ds FROM src_union_view WHERE key=86 AND ds ='1'
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT key, value, ds FROM src_union_view WHERE key=86 AND ds ='1'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: src_union_1
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ Filter Operator
+ predicate: ((key = 86) and (ds = '1')) (type: boolean)
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ Select Operator
+ expressions: value (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ Union
+ Statistics: Num rows: 3 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ Select Operator
+ expressions: 86 (type: int), _col0 (type: string), '1' (type: string)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 3 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 3 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ TableScan
+ alias: src_union_2
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ Filter Operator
+ predicate: ((key = 86) and (ds = '1')) (type: boolean)
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ Select Operator
+ expressions: value (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ Union
+ Statistics: Num rows: 3 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ Select Operator
+ expressions: 86 (type: int), _col0 (type: string), '1' (type: string)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 3 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 3 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ TableScan
+ alias: src_union_3
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ Filter Operator
+ predicate: ((key = 86) and (ds = '1')) (type: boolean)
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ Select Operator
+ expressions: value (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ Union
+ Statistics: Num rows: 3 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ Select Operator
+ expressions: 86 (type: int), _col0 (type: string), '1' (type: string)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 3 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 3 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
http://git-wip-us.apache.org/repos/asf/hive/blob/09271872/ql/src/test/results/clientpositive/groupby_ppd.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_ppd.q.out b/ql/src/test/results/clientpositive/groupby_ppd.q.out
index c63acd3..515f62e 100644
--- a/ql/src/test/results/clientpositive/groupby_ppd.q.out
+++ b/ql/src/test/results/clientpositive/groupby_ppd.q.out
@@ -28,23 +28,23 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
expressions: foo (type: int)
- outputColumnNames: _col1
+ outputColumnNames: _col0
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Union
Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
- expressions: 1 (type: int), _col1 (type: int)
- outputColumnNames: _col0, _col1
+ expressions: _col0 (type: int)
+ outputColumnNames: _col1
Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Group By Operator
- keys: _col0 (type: int), _col1 (type: int)
+ keys: 1 (type: int), _col1 (type: int)
mode: hash
outputColumnNames: _col0, _col1
Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Reduce Output Operator
- key expressions: _col0 (type: int), _col1 (type: int)
+ key expressions: 1 (type: int), _col1 (type: int)
sort order: ++
- Map-reduce partition columns: _col0 (type: int), _col1 (type: int)
+ Map-reduce partition columns: 1 (type: int), _col1 (type: int)
Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE
TableScan
alias: c
@@ -54,32 +54,32 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
expressions: foo (type: int)
- outputColumnNames: _col1
+ outputColumnNames: _col0
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Union
Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
- expressions: 1 (type: int), _col1 (type: int)
- outputColumnNames: _col0, _col1
+ expressions: _col0 (type: int)
+ outputColumnNames: _col1
Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Group By Operator
- keys: _col0 (type: int), _col1 (type: int)
+ keys: 1 (type: int), _col1 (type: int)
mode: hash
outputColumnNames: _col0, _col1
Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Reduce Output Operator
- key expressions: _col0 (type: int), _col1 (type: int)
+ key expressions: 1 (type: int), _col1 (type: int)
sort order: ++
- Map-reduce partition columns: _col0 (type: int), _col1 (type: int)
+ Map-reduce partition columns: 1 (type: int), _col1 (type: int)
Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Reduce Operator Tree:
Group By Operator
- keys: KEY._col0 (type: int), KEY._col1 (type: int)
+ keys: 1 (type: int), KEY._col1 (type: int)
mode: mergepartial
outputColumnNames: _col0, _col1
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
- expressions: _col1 (type: int), _col0 (type: int)
+ expressions: _col1 (type: int), 1 (type: int)
outputColumnNames: _col0, _col1
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
File Output Operator
http://git-wip-us.apache.org/repos/asf/hive/blob/09271872/ql/src/test/results/clientpositive/perf/query66.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query66.q.out b/ql/src/test/results/clientpositive/perf/query66.q.out
index d698602..a606946 100644
--- a/ql/src/test/results/clientpositive/perf/query66.q.out
+++ b/ql/src/test/results/clientpositive/perf/query66.q.out
@@ -464,167 +464,169 @@ Stage-0
Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30","_col31","_col32","_col33","_col34","_col35","_col36","_col37","_col38","_col39","_col40","_col41","_col42","_col43"]
<-Reducer 8 [SIMPLE_EDGE]
SHUFFLE [RS_73]
- Group By Operator [GBY_71] (rows=26136 width=471)
- Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30","_col31","_col32","_col33","_col34","_col35","_col36","_col37","_col38","_col39","_col40","_col41","_col42","_col43"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)","sum(VALUE._col3)","sum(VALUE._col4)","sum(VALUE._col5)","sum(VALUE._col6)","sum(VALUE._col7)","sum(VALUE._col8)","sum(VALUE._col9)","sum(VALUE._col10)","sum(VALUE._col11)","sum(VALUE._col12)","sum(VALUE._col13)","sum(VALUE._col14)","sum(VALUE._col15)","sum(VALUE._col16)","sum(VALUE._col17)","sum(VALUE._col18)","sum(VALUE._col19)","sum(VALUE._col20)","sum(VALUE._col21)","sum(VALUE._col22)","sum(VALUE._col23)","sum(VALUE._col24)","sum(VALUE._col25)","sum(VALUE._col26)","sum(VALUE._col27)","sum(VALUE._col28)","s
um(VALUE._col29)","sum(VALUE._col30)","sum(VALUE._col31)","sum(VALUE._col32)","sum(VALUE._col33)","sum(VALUE._col34)","sum(VALUE._col35)"],keys:KEY._col0, KEY._col1, KEY._col2, KEY._col3, KEY._col4, KEY._col5, KEY._col6, KEY._col7
- <-Union 7 [SIMPLE_EDGE]
- <-Reducer 19 [CONTAINS]
- Reduce Output Operator [RS_70]
- PartitionCols:_col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7
- Group By Operator [GBY_69] (rows=52272 width=471)
- Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30","_col31","_col32","_col33","_col34","_col35","_col36","_col37","_col38","_col39","_col40","_col41","_col42","_col43"],aggregations:["sum(_col8)","sum(_col9)","sum(_col10)","sum(_col11)","sum(_col12)","sum(_col13)","sum(_col14)","sum(_col15)","sum(_col16)","sum(_col17)","sum(_col18)","sum(_col19)","sum(_col20)","sum(_col21)","sum(_col22)","sum(_col23)","sum(_col24)","sum(_col25)","sum(_col26)","sum(_col27)","sum(_col28)","sum(_col29)","sum(_col30)","sum(_col31)","sum(_col32)","sum(_col33)","sum(_col34)","sum(_col35)","sum(_col36)","sum(_col37)","sum(_col38)","sum(_col39)","sum(_col40)","sum(_col41)","sum(_col42)","sum(_col43)"],keys:_col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7
- Select Operator [SEL_67] (rows=52272 width=471)
- Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30","_col31","_col32","_col33","_col34","_col35","_col36","_col37","_col38","_col39","_col40","_col41","_col42","_col43"]
- Select Operator [SEL_65] (rows=26136 width=471)
- Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30","_col31"]
- Group By Operator [GBY_64] (rows=26136 width=471)
- Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)","sum(VALUE._col3)","sum(VALUE._col4)","sum(VALUE._col5)","sum(VALUE._col6)","sum(VALUE._col7)","sum(VALUE._col8)","sum(VALUE._col9)","sum(VALUE._col10)","sum(VALUE._col11)","sum(VALUE._col12)","sum(VALUE._col13)","sum(VALUE._col14)","sum(VALUE._col15)","sum(VALUE._col16)","sum(VALUE._col17)","sum(VALUE._col18)","sum(VALUE._col19)","sum(VALUE._col20)","sum(VALUE._col21)","sum(VALUE._col22)","sum(VALUE._col23)"],keys:KEY._col0, KEY._col1, KEY._col2, KEY._col3, KEY._col4, KEY._col5, 2002
- <-Reducer 18 [SIMPLE_EDGE]
- SHUFFLE [RS_63]
- PartitionCols:_col0, _col1, _col2, _col3, _col4, _col5, 2002
- Group By Operator [GBY_62] (rows=52272 width=471)
- Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30"],aggregations:["sum(_col7)","sum(_col8)","sum(_col9)","sum(_col10)","sum(_col11)","sum(_col12)","sum(_col13)","sum(_col14)","sum(_col15)","sum(_col16)","sum(_col17)","sum(_col18)","sum(_col19)","sum(_col20)","sum(_col21)","sum(_col22)","sum(_col23)","sum(_col24)","sum(_col25)","sum(_col26)","sum(_col27)","sum(_col28)","sum(_col29)","sum(_col30)"],keys:_col0, _col1, _col2, _col3, _col4, _col5, 2002
- Select Operator [SEL_60] (rows=52272 width=471)
- Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30"]
- Merge Join Operator [MERGEJOIN_122] (rows=52272 width=471)
- Conds:RS_57._col2=RS_58._col0(Inner),Output:["_col4","_col5","_col6","_col8","_col9","_col10","_col11","_col12","_col13","_col16"]
- <-Map 23 [SIMPLE_EDGE]
- SHUFFLE [RS_58]
- PartitionCols:_col0
- Select Operator [SEL_47] (rows=1 width=0)
- Output:["_col0"]
- Filter Operator [FIL_114] (rows=1 width=0)
- predicate:((sm_carrier) IN ('DIAMOND', 'AIRBORNE') and sm_ship_mode_sk is not null)
- TableScan [TS_45] (rows=1 width=0)
- default@ship_mode,ship_mode,Tbl:PARTIAL,Col:NONE,Output:["sm_ship_mode_sk","sm_carrier"]
- <-Reducer 17 [SIMPLE_EDGE]
- SHUFFLE [RS_57]
- PartitionCols:_col2
- Merge Join Operator [MERGEJOIN_121] (rows=47520 width=471)
- Conds:RS_54._col1=RS_55._col0(Inner),Output:["_col2","_col4","_col5","_col6","_col8","_col9","_col10","_col11","_col12","_col13","_col16"]
- <-Map 22 [SIMPLE_EDGE]
- SHUFFLE [RS_55]
- PartitionCols:_col0
- Select Operator [SEL_44] (rows=43200 width=471)
- Output:["_col0"]
- Filter Operator [FIL_113] (rows=43200 width=471)
- predicate:(t_time BETWEEN 49530 AND 78330 and t_time_sk is not null)
- TableScan [TS_42] (rows=86400 width=471)
- default@time_dim,time_dim,Tbl:COMPLETE,Col:NONE,Output:["t_time_sk","t_time"]
- <-Reducer 16 [SIMPLE_EDGE]
- SHUFFLE [RS_54]
- PartitionCols:_col1
- Merge Join Operator [MERGEJOIN_120] (rows=40176 width=1119)
- Conds:RS_51._col0=RS_52._col0(Inner),Output:["_col1","_col2","_col4","_col5","_col6","_col8","_col9","_col10","_col11","_col12","_col13","_col16"]
- <-Map 21 [SIMPLE_EDGE]
- SHUFFLE [RS_52]
- PartitionCols:_col0
- Select Operator [SEL_41] (rows=36524 width=1119)
- Output:["_col0","_col2"]
- Filter Operator [FIL_112] (rows=36524 width=1119)
- predicate:((d_year = 2002) and d_date_sk is not null)
- TableScan [TS_39] (rows=73049 width=1119)
- default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_moy"]
- <-Reducer 15 [SIMPLE_EDGE]
- SHUFFLE [RS_51]
- PartitionCols:_col0
- Merge Join Operator [MERGEJOIN_119] (rows=29 width=1054)
- Conds:RS_48._col3=RS_49._col0(Inner),Output:["_col0","_col1","_col2","_col4","_col5","_col6","_col8","_col9","_col10","_col11","_col12","_col13"]
- <-Map 14 [SIMPLE_EDGE]
- SHUFFLE [RS_48]
- PartitionCols:_col3
- Select Operator [SEL_35] (rows=1 width=0)
- Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
- Filter Operator [FIL_110] (rows=1 width=0)
- predicate:(cs_warehouse_sk is not null and cs_sold_date_sk is not null and cs_sold_time_sk is not null and cs_ship_mode_sk is not null)
- TableScan [TS_33] (rows=1 width=0)
- default@catalog_sales,catalog_sales,Tbl:PARTIAL,Col:NONE,Output:["cs_sold_date_sk","cs_sold_time_sk","cs_ship_mode_sk","cs_warehouse_sk","cs_quantity","cs_ext_sales_price","cs_net_paid_inc_ship_tax"]
- <-Map 20 [SIMPLE_EDGE]
- SHUFFLE [RS_49]
- PartitionCols:_col0
- Select Operator [SEL_38] (rows=27 width=1029)
- Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
- Filter Operator [FIL_111] (rows=27 width=1029)
- predicate:w_warehouse_sk is not null
- TableScan [TS_36] (rows=27 width=1029)
- default@warehouse,warehouse,Tbl:COMPLETE,Col:NONE,Output:["w_warehouse_sk","w_warehouse_name","w_warehouse_sq_ft","w_city","w_county","w_state","w_country"]
- <-Reducer 6 [CONTAINS]
- Reduce Output Operator [RS_70]
- PartitionCols:_col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7
- Group By Operator [GBY_69] (rows=52272 width=471)
- Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30","_col31","_col32","_col33","_col34","_col35","_col36","_col37","_col38","_col39","_col40","_col41","_col42","_col43"],aggregations:["sum(_col8)","sum(_col9)","sum(_col10)","sum(_col11)","sum(_col12)","sum(_col13)","sum(_col14)","sum(_col15)","sum(_col16)","sum(_col17)","sum(_col18)","sum(_col19)","sum(_col20)","sum(_col21)","sum(_col22)","sum(_col23)","sum(_col24)","sum(_col25)","sum(_col26)","sum(_col27)","sum(_col28)","sum(_col29)","sum(_col30)","sum(_col31)","sum(_col32)","sum(_col33)","sum(_col34)","sum(_col35)","sum(_col36)","sum(_col37)","sum(_col38)","sum(_col39)","sum(_col40)","sum(_col41)","sum(_col42)","sum(_col43)"],keys:_col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7
- Select Operator [SEL_67] (rows=52272 width=471)
- Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30","_col31","_col32","_col33","_col34","_col35","_col36","_col37","_col38","_col39","_col40","_col41","_col42","_col43"]
- Select Operator [SEL_32] (rows=26136 width=471)
- Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30","_col31"]
- Group By Operator [GBY_31] (rows=26136 width=471)
- Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)","sum(VALUE._col3)","sum(VALUE._col4)","sum(VALUE._col5)","sum(VALUE._col6)","sum(VALUE._col7)","sum(VALUE._col8)","sum(VALUE._col9)","sum(VALUE._col10)","sum(VALUE._col11)","sum(VALUE._col12)","sum(VALUE._col13)","sum(VALUE._col14)","sum(VALUE._col15)","sum(VALUE._col16)","sum(VALUE._col17)","sum(VALUE._col18)","sum(VALUE._col19)","sum(VALUE._col20)","sum(VALUE._col21)","sum(VALUE._col22)","sum(VALUE._col23)"],keys:KEY._col0, KEY._col1, KEY._col2, KEY._col3, KEY._col4, KEY._col5, 2002
- <-Reducer 5 [SIMPLE_EDGE]
- SHUFFLE [RS_30]
- PartitionCols:_col0, _col1, _col2, _col3, _col4, _col5, 2002
- Group By Operator [GBY_29] (rows=52272 width=471)
- Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30"],aggregations:["sum(_col7)","sum(_col8)","sum(_col9)","sum(_col10)","sum(_col11)","sum(_col12)","sum(_col13)","sum(_col14)","sum(_col15)","sum(_col16)","sum(_col17)","sum(_col18)","sum(_col19)","sum(_col20)","sum(_col21)","sum(_col22)","sum(_col23)","sum(_col24)","sum(_col25)","sum(_col26)","sum(_col27)","sum(_col28)","sum(_col29)","sum(_col30)"],keys:_col0, _col1, _col2, _col3, _col4, _col5, 2002
- Select Operator [SEL_27] (rows=52272 width=471)
- Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30"]
- Merge Join Operator [MERGEJOIN_118] (rows=52272 width=471)
- Conds:RS_24._col2=RS_25._col0(Inner),Output:["_col4","_col5","_col6","_col8","_col9","_col10","_col11","_col12","_col13","_col16"]
- <-Map 13 [SIMPLE_EDGE]
- SHUFFLE [RS_25]
- PartitionCols:_col0
- Select Operator [SEL_14] (rows=1 width=0)
- Output:["_col0"]
- Filter Operator [FIL_109] (rows=1 width=0)
- predicate:((sm_carrier) IN ('DIAMOND', 'AIRBORNE') and sm_ship_mode_sk is not null)
- TableScan [TS_12] (rows=1 width=0)
- default@ship_mode,ship_mode,Tbl:PARTIAL,Col:NONE,Output:["sm_ship_mode_sk","sm_carrier"]
- <-Reducer 4 [SIMPLE_EDGE]
- SHUFFLE [RS_24]
- PartitionCols:_col2
- Merge Join Operator [MERGEJOIN_117] (rows=47520 width=471)
- Conds:RS_21._col1=RS_22._col0(Inner),Output:["_col2","_col4","_col5","_col6","_col8","_col9","_col10","_col11","_col12","_col13","_col16"]
- <-Map 12 [SIMPLE_EDGE]
- SHUFFLE [RS_22]
- PartitionCols:_col0
- Select Operator [SEL_11] (rows=43200 width=471)
- Output:["_col0"]
- Filter Operator [FIL_108] (rows=43200 width=471)
- predicate:(t_time BETWEEN 49530 AND 78330 and t_time_sk is not null)
- TableScan [TS_9] (rows=86400 width=471)
- default@time_dim,time_dim,Tbl:COMPLETE,Col:NONE,Output:["t_time_sk","t_time"]
- <-Reducer 3 [SIMPLE_EDGE]
- SHUFFLE [RS_21]
- PartitionCols:_col1
- Merge Join Operator [MERGEJOIN_116] (rows=40176 width=1119)
- Conds:RS_18._col0=RS_19._col0(Inner),Output:["_col1","_col2","_col4","_col5","_col6","_col8","_col9","_col10","_col11","_col12","_col13","_col16"]
- <-Map 11 [SIMPLE_EDGE]
- SHUFFLE [RS_19]
- PartitionCols:_col0
- Select Operator [SEL_8] (rows=36524 width=1119)
- Output:["_col0","_col2"]
- Filter Operator [FIL_107] (rows=36524 width=1119)
- predicate:((d_year = 2002) and d_date_sk is not null)
- TableScan [TS_6] (rows=73049 width=1119)
- default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_moy"]
- <-Reducer 2 [SIMPLE_EDGE]
- SHUFFLE [RS_18]
- PartitionCols:_col0
- Merge Join Operator [MERGEJOIN_115] (rows=29 width=1054)
- Conds:RS_15._col3=RS_16._col0(Inner),Output:["_col0","_col1","_col2","_col4","_col5","_col6","_col8","_col9","_col10","_col11","_col12","_col13"]
- <-Map 1 [SIMPLE_EDGE]
- SHUFFLE [RS_15]
- PartitionCols:_col3
- Select Operator [SEL_2] (rows=1 width=0)
- Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
- Filter Operator [FIL_105] (rows=1 width=0)
- predicate:(ws_warehouse_sk is not null and ws_sold_date_sk is not null and ws_sold_time_sk is not null and ws_ship_mode_sk is not null)
- TableScan [TS_0] (rows=1 width=0)
- default@web_sales,web_sales,Tbl:PARTIAL,Col:NONE,Output:["ws_sold_date_sk","ws_sold_time_sk","ws_ship_mode_sk","ws_warehouse_sk","ws_quantity","ws_sales_price","ws_net_paid_inc_tax"]
- <-Map 10 [SIMPLE_EDGE]
- SHUFFLE [RS_16]
- PartitionCols:_col0
- Select Operator [SEL_5] (rows=27 width=1029)
- Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
- Filter Operator [FIL_106] (rows=27 width=1029)
- predicate:w_warehouse_sk is not null
- TableScan [TS_3] (rows=27 width=1029)
- default@warehouse,warehouse,Tbl:COMPLETE,Col:NONE,Output:["w_warehouse_sk","w_warehouse_name","w_warehouse_sq_ft","w_city","w_county","w_state","w_country"]
+ Select Operator [SEL_72] (rows=26136 width=471)
+ Output:["_col0","_col1","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col2","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col3","_col30","_col31","_col32","_col33","_col34","_col35","_col36","_col37","_col38","_col39","_col4","_col40","_col41","_col42","_col43","_col5","_col8","_col9"]
+ Group By Operator [GBY_71] (rows=26136 width=471)
+ Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30","_col31","_col32","_col33","_col34","_col35","_col36","_col37","_col38","_col39","_col40","_col41","_col42","_col43"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)","sum(VALUE._col3)","sum(VALUE._col4)","sum(VALUE._col5)","sum(VALUE._col6)","sum(VALUE._col7)","sum(VALUE._col8)","sum(VALUE._col9)","sum(VALUE._col10)","sum(VALUE._col11)","sum(VALUE._col12)","sum(VALUE._col13)","sum(VALUE._col14)","sum(VALUE._col15)","sum(VALUE._col16)","sum(VALUE._col17)","sum(VALUE._col18)","sum(VALUE._col19)","sum(VALUE._col20)","sum(VALUE._col21)","sum(VALUE._col22)","sum(VALUE._col23)","sum(VALUE._col24)","sum(VALUE._col25)","sum(VALUE._col26)","sum(VALUE._col27)","sum(VALUE._col28)",
"sum(VALUE._col29)","sum(VALUE._col30)","sum(VALUE._col31)","sum(VALUE._col32)","sum(VALUE._col33)","sum(VALUE._col34)","sum(VALUE._col35)"],keys:KEY._col0, KEY._col1, KEY._col2, KEY._col3, KEY._col4, KEY._col5, 'DIAMOND,AIRBORNE', 2002
+ <-Union 7 [SIMPLE_EDGE]
+ <-Reducer 19 [CONTAINS]
+ Reduce Output Operator [RS_70]
+ PartitionCols:_col0, _col1, _col2, _col3, _col4, _col5, 'DIAMOND,AIRBORNE', 2002
+ Group By Operator [GBY_69] (rows=52272 width=471)
+ Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30","_col31","_col32","_col33","_col34","_col35","_col36","_col37","_col38","_col39","_col40","_col41","_col42","_col43"],aggregations:["sum(_col8)","sum(_col9)","sum(_col10)","sum(_col11)","sum(_col12)","sum(_col13)","sum(_col14)","sum(_col15)","sum(_col16)","sum(_col17)","sum(_col18)","sum(_col19)","sum(_col20)","sum(_col21)","sum(_col22)","sum(_col23)","sum(_col24)","sum(_col25)","sum(_col26)","sum(_col27)","sum(_col28)","sum(_col29)","sum(_col30)","sum(_col31)","sum(_col32)","sum(_col33)","sum(_col34)","sum(_col35)","sum(_col36)","sum(_col37)","sum(_col38)","sum(_col39)","sum(_col40)","sum(_col41)","sum(_col42)","sum(_col43)"],keys:_col0, _col1, _col2, _col3, _col4, _col5, 'DIAMOND,AIRBO
RNE', 2002
+ Select Operator [SEL_67] (rows=52272 width=471)
+ Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30","_col31","_col32","_col33","_col34","_col35","_col36","_col37","_col38","_col39","_col40","_col41","_col42","_col43"]
+ Select Operator [SEL_65] (rows=26136 width=471)
+ Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29"]
+ Group By Operator [GBY_64] (rows=26136 width=471)
+ Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)","sum(VALUE._col3)","sum(VALUE._col4)","sum(VALUE._col5)","sum(VALUE._col6)","sum(VALUE._col7)","sum(VALUE._col8)","sum(VALUE._col9)","sum(VALUE._col10)","sum(VALUE._col11)","sum(VALUE._col12)","sum(VALUE._col13)","sum(VALUE._col14)","sum(VALUE._col15)","sum(VALUE._col16)","sum(VALUE._col17)","sum(VALUE._col18)","sum(VALUE._col19)","sum(VALUE._col20)","sum(VALUE._col21)","sum(VALUE._col22)","sum(VALUE._col23)"],keys:KEY._col0, KEY._col1, KEY._col2, KEY._col3, KEY._col4, KEY._col5, 2002
+ <-Reducer 18 [SIMPLE_EDGE]
+ SHUFFLE [RS_63]
+ PartitionCols:_col0, _col1, _col2, _col3, _col4, _col5, 2002
+ Group By Operator [GBY_62] (rows=52272 width=471)
+ Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30"],aggregations:["sum(_col7)","sum(_col8)","sum(_col9)","sum(_col10)","sum(_col11)","sum(_col12)","sum(_col13)","sum(_col14)","sum(_col15)","sum(_col16)","sum(_col17)","sum(_col18)","sum(_col19)","sum(_col20)","sum(_col21)","sum(_col22)","sum(_col23)","sum(_col24)","sum(_col25)","sum(_col26)","sum(_col27)","sum(_col28)","sum(_col29)","sum(_col30)"],keys:_col0, _col1, _col2, _col3, _col4, _col5, 2002
+ Select Operator [SEL_60] (rows=52272 width=471)
+ Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30"]
+ Merge Join Operator [MERGEJOIN_122] (rows=52272 width=471)
+ Conds:RS_57._col2=RS_58._col0(Inner),Output:["_col4","_col5","_col6","_col8","_col9","_col10","_col11","_col12","_col13","_col16"]
+ <-Map 23 [SIMPLE_EDGE]
+ SHUFFLE [RS_58]
+ PartitionCols:_col0
+ Select Operator [SEL_47] (rows=1 width=0)
+ Output:["_col0"]
+ Filter Operator [FIL_114] (rows=1 width=0)
+ predicate:((sm_carrier) IN ('DIAMOND', 'AIRBORNE') and sm_ship_mode_sk is not null)
+ TableScan [TS_45] (rows=1 width=0)
+ default@ship_mode,ship_mode,Tbl:PARTIAL,Col:NONE,Output:["sm_ship_mode_sk","sm_carrier"]
+ <-Reducer 17 [SIMPLE_EDGE]
+ SHUFFLE [RS_57]
+ PartitionCols:_col2
+ Merge Join Operator [MERGEJOIN_121] (rows=47520 width=471)
+ Conds:RS_54._col1=RS_55._col0(Inner),Output:["_col2","_col4","_col5","_col6","_col8","_col9","_col10","_col11","_col12","_col13","_col16"]
+ <-Map 22 [SIMPLE_EDGE]
+ SHUFFLE [RS_55]
+ PartitionCols:_col0
+ Select Operator [SEL_44] (rows=43200 width=471)
+ Output:["_col0"]
+ Filter Operator [FIL_113] (rows=43200 width=471)
+ predicate:(t_time BETWEEN 49530 AND 78330 and t_time_sk is not null)
+ TableScan [TS_42] (rows=86400 width=471)
+ default@time_dim,time_dim,Tbl:COMPLETE,Col:NONE,Output:["t_time_sk","t_time"]
+ <-Reducer 16 [SIMPLE_EDGE]
+ SHUFFLE [RS_54]
+ PartitionCols:_col1
+ Merge Join Operator [MERGEJOIN_120] (rows=40176 width=1119)
+ Conds:RS_51._col0=RS_52._col0(Inner),Output:["_col1","_col2","_col4","_col5","_col6","_col8","_col9","_col10","_col11","_col12","_col13","_col16"]
+ <-Map 21 [SIMPLE_EDGE]
+ SHUFFLE [RS_52]
+ PartitionCols:_col0
+ Select Operator [SEL_41] (rows=36524 width=1119)
+ Output:["_col0","_col2"]
+ Filter Operator [FIL_112] (rows=36524 width=1119)
+ predicate:((d_year = 2002) and d_date_sk is not null)
+ TableScan [TS_39] (rows=73049 width=1119)
+ default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_moy"]
+ <-Reducer 15 [SIMPLE_EDGE]
+ SHUFFLE [RS_51]
+ PartitionCols:_col0
+ Merge Join Operator [MERGEJOIN_119] (rows=29 width=1054)
+ Conds:RS_48._col3=RS_49._col0(Inner),Output:["_col0","_col1","_col2","_col4","_col5","_col6","_col8","_col9","_col10","_col11","_col12","_col13"]
+ <-Map 14 [SIMPLE_EDGE]
+ SHUFFLE [RS_48]
+ PartitionCols:_col3
+ Select Operator [SEL_35] (rows=1 width=0)
+ Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
+ Filter Operator [FIL_110] (rows=1 width=0)
+ predicate:(cs_warehouse_sk is not null and cs_sold_date_sk is not null and cs_sold_time_sk is not null and cs_ship_mode_sk is not null)
+ TableScan [TS_33] (rows=1 width=0)
+ default@catalog_sales,catalog_sales,Tbl:PARTIAL,Col:NONE,Output:["cs_sold_date_sk","cs_sold_time_sk","cs_ship_mode_sk","cs_warehouse_sk","cs_quantity","cs_ext_sales_price","cs_net_paid_inc_ship_tax"]
+ <-Map 20 [SIMPLE_EDGE]
+ SHUFFLE [RS_49]
+ PartitionCols:_col0
+ Select Operator [SEL_38] (rows=27 width=1029)
+ Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
+ Filter Operator [FIL_111] (rows=27 width=1029)
+ predicate:w_warehouse_sk is not null
+ TableScan [TS_36] (rows=27 width=1029)
+ default@warehouse,warehouse,Tbl:COMPLETE,Col:NONE,Output:["w_warehouse_sk","w_warehouse_name","w_warehouse_sq_ft","w_city","w_county","w_state","w_country"]
+ <-Reducer 6 [CONTAINS]
+ Reduce Output Operator [RS_70]
+ PartitionCols:_col0, _col1, _col2, _col3, _col4, _col5, 'DIAMOND,AIRBORNE', 2002
+ Group By Operator [GBY_69] (rows=52272 width=471)
+ Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30","_col31","_col32","_col33","_col34","_col35","_col36","_col37","_col38","_col39","_col40","_col41","_col42","_col43"],aggregations:["sum(_col8)","sum(_col9)","sum(_col10)","sum(_col11)","sum(_col12)","sum(_col13)","sum(_col14)","sum(_col15)","sum(_col16)","sum(_col17)","sum(_col18)","sum(_col19)","sum(_col20)","sum(_col21)","sum(_col22)","sum(_col23)","sum(_col24)","sum(_col25)","sum(_col26)","sum(_col27)","sum(_col28)","sum(_col29)","sum(_col30)","sum(_col31)","sum(_col32)","sum(_col33)","sum(_col34)","sum(_col35)","sum(_col36)","sum(_col37)","sum(_col38)","sum(_col39)","sum(_col40)","sum(_col41)","sum(_col42)","sum(_col43)"],keys:_col0, _col1, _col2, _col3, _col4, _col5, 'DIAMOND,AIRBO
RNE', 2002
+ Select Operator [SEL_67] (rows=52272 width=471)
+ Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30","_col31","_col32","_col33","_col34","_col35","_col36","_col37","_col38","_col39","_col40","_col41","_col42","_col43"]
+ Select Operator [SEL_32] (rows=26136 width=471)
+ Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29"]
+ Group By Operator [GBY_31] (rows=26136 width=471)
+ Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)","sum(VALUE._col3)","sum(VALUE._col4)","sum(VALUE._col5)","sum(VALUE._col6)","sum(VALUE._col7)","sum(VALUE._col8)","sum(VALUE._col9)","sum(VALUE._col10)","sum(VALUE._col11)","sum(VALUE._col12)","sum(VALUE._col13)","sum(VALUE._col14)","sum(VALUE._col15)","sum(VALUE._col16)","sum(VALUE._col17)","sum(VALUE._col18)","sum(VALUE._col19)","sum(VALUE._col20)","sum(VALUE._col21)","sum(VALUE._col22)","sum(VALUE._col23)"],keys:KEY._col0, KEY._col1, KEY._col2, KEY._col3, KEY._col4, KEY._col5, 2002
+ <-Reducer 5 [SIMPLE_EDGE]
+ SHUFFLE [RS_30]
+ PartitionCols:_col0, _col1, _col2, _col3, _col4, _col5, 2002
+ Group By Operator [GBY_29] (rows=52272 width=471)
+ Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30"],aggregations:["sum(_col7)","sum(_col8)","sum(_col9)","sum(_col10)","sum(_col11)","sum(_col12)","sum(_col13)","sum(_col14)","sum(_col15)","sum(_col16)","sum(_col17)","sum(_col18)","sum(_col19)","sum(_col20)","sum(_col21)","sum(_col22)","sum(_col23)","sum(_col24)","sum(_col25)","sum(_col26)","sum(_col27)","sum(_col28)","sum(_col29)","sum(_col30)"],keys:_col0, _col1, _col2, _col3, _col4, _col5, 2002
+ Select Operator [SEL_27] (rows=52272 width=471)
+ Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30"]
+ Merge Join Operator [MERGEJOIN_118] (rows=52272 width=471)
+ Conds:RS_24._col2=RS_25._col0(Inner),Output:["_col4","_col5","_col6","_col8","_col9","_col10","_col11","_col12","_col13","_col16"]
+ <-Map 13 [SIMPLE_EDGE]
+ SHUFFLE [RS_25]
+ PartitionCols:_col0
+ Select Operator [SEL_14] (rows=1 width=0)
+ Output:["_col0"]
+ Filter Operator [FIL_109] (rows=1 width=0)
+ predicate:((sm_carrier) IN ('DIAMOND', 'AIRBORNE') and sm_ship_mode_sk is not null)
+ TableScan [TS_12] (rows=1 width=0)
+ default@ship_mode,ship_mode,Tbl:PARTIAL,Col:NONE,Output:["sm_ship_mode_sk","sm_carrier"]
+ <-Reducer 4 [SIMPLE_EDGE]
+ SHUFFLE [RS_24]
+ PartitionCols:_col2
+ Merge Join Operator [MERGEJOIN_117] (rows=47520 width=471)
+ Conds:RS_21._col1=RS_22._col0(Inner),Output:["_col2","_col4","_col5","_col6","_col8","_col9","_col10","_col11","_col12","_col13","_col16"]
+ <-Map 12 [SIMPLE_EDGE]
+ SHUFFLE [RS_22]
+ PartitionCols:_col0
+ Select Operator [SEL_11] (rows=43200 width=471)
+ Output:["_col0"]
+ Filter Operator [FIL_108] (rows=43200 width=471)
+ predicate:(t_time BETWEEN 49530 AND 78330 and t_time_sk is not null)
+ TableScan [TS_9] (rows=86400 width=471)
+ default@time_dim,time_dim,Tbl:COMPLETE,Col:NONE,Output:["t_time_sk","t_time"]
+ <-Reducer 3 [SIMPLE_EDGE]
+ SHUFFLE [RS_21]
+ PartitionCols:_col1
+ Merge Join Operator [MERGEJOIN_116] (rows=40176 width=1119)
+ Conds:RS_18._col0=RS_19._col0(Inner),Output:["_col1","_col2","_col4","_col5","_col6","_col8","_col9","_col10","_col11","_col12","_col13","_col16"]
+ <-Map 11 [SIMPLE_EDGE]
+ SHUFFLE [RS_19]
+ PartitionCols:_col0
+ Select Operator [SEL_8] (rows=36524 width=1119)
+ Output:["_col0","_col2"]
+ Filter Operator [FIL_107] (rows=36524 width=1119)
+ predicate:((d_year = 2002) and d_date_sk is not null)
+ TableScan [TS_6] (rows=73049 width=1119)
+ default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_moy"]
+ <-Reducer 2 [SIMPLE_EDGE]
+ SHUFFLE [RS_18]
+ PartitionCols:_col0
+ Merge Join Operator [MERGEJOIN_115] (rows=29 width=1054)
+ Conds:RS_15._col3=RS_16._col0(Inner),Output:["_col0","_col1","_col2","_col4","_col5","_col6","_col8","_col9","_col10","_col11","_col12","_col13"]
+ <-Map 1 [SIMPLE_EDGE]
+ SHUFFLE [RS_15]
+ PartitionCols:_col3
+ Select Operator [SEL_2] (rows=1 width=0)
+ Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
+ Filter Operator [FIL_105] (rows=1 width=0)
+ predicate:(ws_warehouse_sk is not null and ws_sold_date_sk is not null and ws_sold_time_sk is not null and ws_ship_mode_sk is not null)
+ TableScan [TS_0] (rows=1 width=0)
+ default@web_sales,web_sales,Tbl:PARTIAL,Col:NONE,Output:["ws_sold_date_sk","ws_sold_time_sk","ws_ship_mode_sk","ws_warehouse_sk","ws_quantity","ws_sales_price","ws_net_paid_inc_tax"]
+ <-Map 10 [SIMPLE_EDGE]
+ SHUFFLE [RS_16]
+ PartitionCols:_col0
+ Select Operator [SEL_5] (rows=27 width=1029)
+ Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
+ Filter Operator [FIL_106] (rows=27 width=1029)
+ predicate:w_warehouse_sk is not null
+ TableScan [TS_3] (rows=27 width=1029)
+ default@warehouse,warehouse,Tbl:COMPLETE,Col:NONE,Output:["w_warehouse_sk","w_warehouse_name","w_warehouse_sq_ft","w_city","w_county","w_state","w_country"]
[16/20] hive git commit: HIVE-13653 : improve config error messages
for LLAP cache size/etc (Sergey Shelukhin, reviewed by Prasanth Jayachandran)
Posted by jd...@apache.org.
HIVE-13653 : improve config error messages for LLAP cache size/etc (Sergey Shelukhin, reviewed by Prasanth Jayachandran)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/f41d693b
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/f41d693b
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/f41d693b
Branch: refs/heads/llap
Commit: f41d693b5b984ea55b01394af0dbb6c7121db90a
Parents: 96f2dc7
Author: Sergey Shelukhin <se...@apache.org>
Authored: Thu May 5 10:41:47 2016 -0700
Committer: Sergey Shelukhin <se...@apache.org>
Committed: Thu May 5 10:41:47 2016 -0700
----------------------------------------------------------------------
.../hadoop/hive/llap/cache/BuddyAllocator.java | 43 +++++++++++++++-----
1 file changed, 32 insertions(+), 11 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/f41d693b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java
index d78c1e0..1d5a7db 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java
@@ -44,6 +44,8 @@ public final class BuddyAllocator implements EvictionAwareAllocator, BuddyAlloca
// We don't know the acceptable size for Java array, so we'll use 1Gb boundary.
// That is guaranteed to fit any maximum allocation.
private static final int MAX_ARENA_SIZE = 1024*1024*1024;
+ // Don't try to operate with less than MIN_SIZE allocator space, it will just give you grief.
+ private static final int MIN_TOTAL_MEMORY_SIZE = 64*1024*1024;
public BuddyAllocator(Configuration conf, MemoryManager mm, LlapDaemonCacheMetrics metrics) {
@@ -51,8 +53,19 @@ public final class BuddyAllocator implements EvictionAwareAllocator, BuddyAlloca
(int)HiveConf.getSizeVar(conf, ConfVars.LLAP_ALLOCATOR_MIN_ALLOC),
(int)HiveConf.getSizeVar(conf, ConfVars.LLAP_ALLOCATOR_MAX_ALLOC),
HiveConf.getIntVar(conf, ConfVars.LLAP_ALLOCATOR_ARENA_COUNT),
- HiveConf.getSizeVar(conf, ConfVars.LLAP_IO_MEMORY_MAX_SIZE),
- mm, metrics);
+ getMaxTotalMemorySize(conf), mm, metrics);
+ }
+
+ private static long getMaxTotalMemorySize(Configuration conf) {
+ long maxSize = HiveConf.getSizeVar(conf, ConfVars.LLAP_IO_MEMORY_MAX_SIZE);
+ if (maxSize > MIN_TOTAL_MEMORY_SIZE || HiveConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST)) {
+ return maxSize;
+ }
+ throw new RuntimeException("Allocator space is too small for reasonable operation; "
+ + ConfVars.LLAP_IO_MEMORY_MAX_SIZE.varname + "=" + maxSize + ", but at least "
+ + MIN_TOTAL_MEMORY_SIZE + " is required. If you cannot spare any memory, you can "
+ + "disable LLAP IO entirely via " + ConfVars.LLAP_IO_ENABLED.varname + "; or set "
+ + ConfVars.LLAP_IO_MEMORY_MODE.varname + " to 'none'");
}
@VisibleForTesting
@@ -69,16 +82,19 @@ public final class BuddyAllocator implements EvictionAwareAllocator, BuddyAlloca
+ ", arena size " + arenaSizeVal + ". total size " + maxSizeVal);
}
+ String minName = ConfVars.LLAP_ALLOCATOR_MIN_ALLOC.varname,
+ maxName = ConfVars.LLAP_ALLOCATOR_MAX_ALLOC.varname;
if (minAllocation < 8) {
- throw new AssertionError("Min allocation must be at least 8 bytes: " + minAllocation);
+ throw new RuntimeException(minName + " must be at least 8 bytes: " + minAllocation);
}
- if (maxSizeVal < arenaSizeVal || maxAllocation < minAllocation) {
- throw new AssertionError("Inconsistent sizes of cache, arena and allocations: "
- + minAllocation + ", " + maxAllocation + ", " + arenaSizeVal + ", " + maxSizeVal);
+ if (maxSizeVal < maxAllocation || maxAllocation < minAllocation) {
+ throw new RuntimeException("Inconsistent sizes; expecting " + minName + " <= " + maxName
+ + " <= " + ConfVars.LLAP_IO_MEMORY_MAX_SIZE.varname + "; configured with min="
+ + minAllocation + ", max=" + maxAllocation + " and total=" + maxSizeVal);
}
if ((Integer.bitCount(minAllocation) != 1) || (Integer.bitCount(maxAllocation) != 1)) {
- throw new AssertionError("Allocation sizes must be powers of two: "
- + minAllocation + ", " + maxAllocation);
+ throw new RuntimeException("Allocation sizes must be powers of two; configured with "
+ + minName + "=" + minAllocation + ", " + maxName + "=" + maxAllocation);
}
if ((arenaSizeVal % maxAllocation) > 0) {
long oldArenaSize = arenaSizeVal;
@@ -94,8 +110,8 @@ public final class BuddyAllocator implements EvictionAwareAllocator, BuddyAlloca
+ " to be divisible by arena size " + arenaSize);
}
if ((maxSizeVal / arenaSize) > Integer.MAX_VALUE) {
- throw new AssertionError(
- "Too many arenas needed to allocate the cache: " + arenaSize + "," + maxSizeVal);
+ throw new RuntimeException(
+ "Too many arenas needed to allocate the cache: " + arenaSize + ", " + maxSizeVal);
}
maxSize = maxSizeVal;
memoryManager.updateMaxSize(maxSize);
@@ -280,7 +296,12 @@ public final class BuddyAllocator implements EvictionAwareAllocator, BuddyAlloca
private FreeList[] freeLists;
void init() {
- data = isDirect ? ByteBuffer.allocateDirect(arenaSize) : ByteBuffer.allocate(arenaSize);
+ try {
+ data = isDirect ? ByteBuffer.allocateDirect(arenaSize) : ByteBuffer.allocate(arenaSize);
+ } catch (OutOfMemoryError oom) {
+ throw new OutOfMemoryError("Cannot allocate " + arenaSize + " bytes: " + oom.getMessage()
+ + "; make sure your xmx and process size are set correctly.");
+ }
int maxMinAllocs = 1 << (arenaSizeLog2 - minAllocLog2);
headers = new byte[maxMinAllocs];
int allocLog2Diff = maxAllocLog2 - minAllocLog2, freeListCount = allocLog2Diff + 1;
[09/20] hive git commit: HIVE-13351: Support drop Primary Key/Foreign
Key constraints (Hari Subramaniyan, reviewed by Ashutosh Chauhan)
Posted by jd...@apache.org.
HIVE-13351: Support drop Primary Key/Foreign Key constraints (Hari Subramaniyan, reviewed by Ashutosh Chauhan)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/212077b8
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/212077b8
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/212077b8
Branch: refs/heads/llap
Commit: 212077b8ae4aed130d8fea38febfc86c2bc55bbb
Parents: b04dc95
Author: Hari Subramaniyan <ha...@apache.org>
Authored: Wed May 4 12:26:38 2016 -0700
Committer: Hari Subramaniyan <ha...@apache.org>
Committed: Wed May 4 12:26:38 2016 -0700
----------------------------------------------------------------------
metastore/if/hive_metastore.thrift | 8 +
.../gen/thrift/gen-cpp/ThriftHiveMetastore.cpp | 2431 ++++++++++--------
.../gen/thrift/gen-cpp/ThriftHiveMetastore.h | 133 +
.../ThriftHiveMetastore_server.skeleton.cpp | 5 +
.../gen/thrift/gen-cpp/hive_metastore_types.cpp | 2180 ++++++++--------
.../gen/thrift/gen-cpp/hive_metastore_types.h | 52 +
.../metastore/api/DropConstraintRequest.java | 591 +++++
.../hive/metastore/api/ThriftHiveMetastore.java | 1966 ++++++++++----
.../gen-php/metastore/ThriftHiveMetastore.php | 242 ++
.../src/gen/thrift/gen-php/metastore/Types.php | 121 +
.../hive_metastore/ThriftHiveMetastore-remote | 7 +
.../hive_metastore/ThriftHiveMetastore.py | 212 ++
.../gen/thrift/gen-py/hive_metastore/ttypes.py | 97 +
.../gen/thrift/gen-rb/hive_metastore_types.rb | 23 +
.../gen/thrift/gen-rb/thrift_hive_metastore.rb | 63 +
.../hadoop/hive/metastore/HiveMetaStore.java | 29 +
.../hive/metastore/HiveMetaStoreClient.java | 6 +
.../hadoop/hive/metastore/IMetaStoreClient.java | 3 +
.../hadoop/hive/metastore/ObjectStore.java | 46 +-
.../apache/hadoop/hive/metastore/RawStore.java | 2 +
.../hadoop/hive/metastore/hbase/HBaseStore.java | 6 +
.../DummyRawStoreControlledCommit.java | 6 +
.../DummyRawStoreForJdoConnection.java | 6 +
.../org/apache/hadoop/hive/ql/exec/DDLTask.java | 21 +-
.../hadoop/hive/ql/hooks/WriteEntity.java | 3 +-
.../apache/hadoop/hive/ql/metadata/Hive.java | 9 +
.../hive/ql/parse/DDLSemanticAnalyzer.java | 13 +-
.../apache/hadoop/hive/ql/parse/HiveParser.g | 9 +
.../hive/ql/parse/SemanticAnalyzerFactory.java | 2 +
.../hadoop/hive/ql/plan/AlterTableDesc.java | 25 +-
.../hadoop/hive/ql/plan/HiveOperation.java | 2 +
.../clientnegative/drop_invalid_constraint1.q | 3 +
.../clientnegative/drop_invalid_constraint2.q | 2 +
.../clientnegative/drop_invalid_constraint3.q | 2 +
.../clientnegative/drop_invalid_constraint4.q | 3 +
.../clientpositive/create_with_constraints.q | 12 +
.../drop_invalid_constraint1.q.out | 15 +
.../drop_invalid_constraint2.q.out | 11 +
.../drop_invalid_constraint3.q.out | 11 +
.../drop_invalid_constraint4.q.out | 19 +
.../create_with_constraints.q.out | 68 +
service/src/gen/thrift/gen-py/__init__.py | 0
42 files changed, 5925 insertions(+), 2540 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/metastore/if/hive_metastore.thrift
----------------------------------------------------------------------
diff --git a/metastore/if/hive_metastore.thrift b/metastore/if/hive_metastore.thrift
index acebf7a..c8d78b6 100755
--- a/metastore/if/hive_metastore.thrift
+++ b/metastore/if/hive_metastore.thrift
@@ -487,6 +487,11 @@ struct ForeignKeysResponse {
1: required list<SQLForeignKey> foreignKeys
}
+struct DropConstraintRequest {
+ 1: required string dbname,
+ 2: required string tablename,
+ 3: required string constraintname
+}
// Return type for get_partitions_by_expr
struct PartitionsByExprResult {
@@ -993,6 +998,9 @@ service ThriftHiveMetastore extends fb303.FacebookService
throws (1:AlreadyExistsException o1,
2:InvalidObjectException o2, 3:MetaException o3,
4:NoSuchObjectException o4)
+ void drop_constraint(1:DropConstraintRequest req)
+ throws(1:NoSuchObjectException o1, 2:MetaException o3)
+
// drops the table and all the partitions associated with it if the table has partitions
// delete data (including partitions) if deleteData is set to true
void drop_table(1:string dbname, 2:string name, 3:bool deleteData)
[20/20] hive git commit: Merge branch 'master' into llap
Posted by jd...@apache.org.
Merge branch 'master' into llap
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/763e6969
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/763e6969
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/763e6969
Branch: refs/heads/llap
Commit: 763e6969d0e78806db0fc875830395c783f18b0c
Parents: 03ee048 0927187
Author: Jason Dere <jd...@hortonworks.com>
Authored: Thu May 5 13:03:53 2016 -0700
Committer: Jason Dere <jd...@hortonworks.com>
Committed: Thu May 5 13:03:53 2016 -0700
----------------------------------------------------------------------
.../src/main/resources/hive-log4j2.properties | 6 +-
.../antlr4/org/apache/hive/hplsql/Hplsql.g4 | 108 +-
.../main/java/org/apache/hive/hplsql/Exec.java | 67 +-
.../java/org/apache/hive/hplsql/Expression.java | 31 +-
.../java/org/apache/hive/hplsql/Select.java | 31 +-
.../java/org/apache/hive/hplsql/Signal.java | 2 +-
.../main/java/org/apache/hive/hplsql/Stmt.java | 154 +-
hplsql/src/main/resources/hplsql-site.xml | 2 -
.../org/apache/hive/hplsql/TestHplsqlLocal.java | 5 +
.../apache/hive/hplsql/TestHplsqlOffline.java | 20 +
hplsql/src/test/queries/local/if3_bteq.sql | 3 +
.../test/queries/offline/create_table_td.sql | 45 +
hplsql/src/test/queries/offline/delete_all.sql | 1 +
hplsql/src/test/queries/offline/select.sql | 42 +
.../test/queries/offline/select_teradata.sql | 12 +
hplsql/src/test/results/db/select_into.out.txt | 3 +-
hplsql/src/test/results/db/select_into2.out.txt | 4 +-
hplsql/src/test/results/local/if3_bteq.out.txt | 3 +
hplsql/src/test/results/local/lang.out.txt | 10 +-
.../results/offline/create_table_mssql.out.txt | 39 +-
.../results/offline/create_table_mssql2.out.txt | 13 +-
.../results/offline/create_table_mysql.out.txt | 5 +-
.../results/offline/create_table_ora.out.txt | 65 +-
.../results/offline/create_table_ora2.out.txt | 9 +-
.../results/offline/create_table_pg.out.txt | 7 +-
.../results/offline/create_table_td.out.txt | 31 +
.../src/test/results/offline/delete_all.out.txt | 2 +
hplsql/src/test/results/offline/select.out.txt | 34 +
.../src/test/results/offline/select_db2.out.txt | 3 +-
.../results/offline/select_teradata.out.txt | 10 +
.../hadoop/hive/llap/cache/BuddyAllocator.java | 43 +-
.../hive/llap/daemon/impl/LlapDaemon.java | 5 +-
metastore/if/hive_metastore.thrift | 8 +
.../gen/thrift/gen-cpp/ThriftHiveMetastore.cpp | 2431 ++++++++++--------
.../gen/thrift/gen-cpp/ThriftHiveMetastore.h | 133 +
.../ThriftHiveMetastore_server.skeleton.cpp | 5 +
.../gen/thrift/gen-cpp/hive_metastore_types.cpp | 2180 ++++++++--------
.../gen/thrift/gen-cpp/hive_metastore_types.h | 52 +
.../metastore/api/DropConstraintRequest.java | 591 +++++
.../hive/metastore/api/ThriftHiveMetastore.java | 1966 ++++++++++----
.../gen-php/metastore/ThriftHiveMetastore.php | 242 ++
.../src/gen/thrift/gen-php/metastore/Types.php | 121 +
.../hive_metastore/ThriftHiveMetastore-remote | 7 +
.../hive_metastore/ThriftHiveMetastore.py | 212 ++
.../gen/thrift/gen-py/hive_metastore/ttypes.py | 97 +
.../gen/thrift/gen-rb/hive_metastore_types.rb | 23 +
.../gen/thrift/gen-rb/thrift_hive_metastore.rb | 63 +
.../hadoop/hive/metastore/HiveMetaStore.java | 29 +
.../hive/metastore/HiveMetaStoreClient.java | 6 +
.../hadoop/hive/metastore/IMetaStoreClient.java | 3 +
.../hadoop/hive/metastore/ObjectStore.java | 46 +-
.../apache/hadoop/hive/metastore/RawStore.java | 2 +
.../hive/metastore/RetryingMetaStoreClient.java | 17 +-
.../hadoop/hive/metastore/hbase/HBaseStore.java | 6 +
.../DummyRawStoreControlledCommit.java | 6 +
.../DummyRawStoreForJdoConnection.java | 6 +
.../org/apache/hadoop/hive/ql/exec/DDLTask.java | 21 +-
.../persistence/HybridHashTableContainer.java | 60 +-
.../ql/exec/persistence/KeyValueContainer.java | 4 +
.../ql/exec/vector/VectorizationContext.java | 7 +
.../hadoop/hive/ql/hooks/WriteEntity.java | 3 +-
.../serde/AbstractParquetMapInspector.java | 4 +-
.../serde/ParquetHiveArrayInspector.java | 4 +-
.../ql/io/parquet/write/DataWritableWriter.java | 67 +-
.../apache/hadoop/hive/ql/metadata/Hive.java | 12 +-
.../rules/HiveReduceExpressionsRule.java | 125 +
.../rules/HiveSortLimitPullUpConstantsRule.java | 157 ++
.../rules/HiveUnionPullUpConstantsRule.java | 133 +
.../hadoop/hive/ql/parse/CalcitePlanner.java | 5 +
.../hive/ql/parse/DDLSemanticAnalyzer.java | 13 +-
.../apache/hadoop/hive/ql/parse/HiveParser.g | 9 +
.../hive/ql/parse/SemanticAnalyzerFactory.java | 2 +
.../hadoop/hive/ql/plan/AlterTableDesc.java | 25 +-
.../hadoop/hive/ql/plan/HiveOperation.java | 2 +
.../ql/io/parquet/TestDataWritableWriter.java | 29 +
.../serde/TestAbstractParquetMapInspector.java | 4 +-
.../serde/TestParquetHiveArrayInspector.java | 4 +-
.../clientnegative/drop_invalid_constraint1.q | 3 +
.../clientnegative/drop_invalid_constraint2.q | 2 +
.../clientnegative/drop_invalid_constraint3.q | 2 +
.../clientnegative/drop_invalid_constraint4.q | 3 +
.../test/queries/clientpositive/cbo_input26.q | 54 +
.../queries/clientpositive/cbo_union_view.q | 19 +
.../clientpositive/create_with_constraints.q | 12 +
.../parquet_array_map_emptynullvals.q | 20 +
.../vector_non_constant_in_expr.q | 4 +
.../drop_invalid_constraint1.q.out | 15 +
.../drop_invalid_constraint2.q.out | 11 +
.../drop_invalid_constraint3.q.out | 11 +
.../drop_invalid_constraint4.q.out | 19 +
.../results/clientpositive/cbo_input26.q.out | 596 +++++
.../results/clientpositive/cbo_union_view.q.out | 228 ++
.../create_with_constraints.q.out | 68 +
.../results/clientpositive/groupby_ppd.q.out | 28 +-
.../clientpositive/load_dyn_part14.q.out | 6 +-
.../parquet_array_map_emptynullvals.q.out | 87 +
.../results/clientpositive/perf/query66.q.out | 328 +--
.../results/clientpositive/perf/query75.q.out | 692 ++---
.../clientpositive/spark/load_dyn_part14.q.out | 6 +-
.../clientpositive/spark/union_remove_25.q.out | 16 +-
.../clientpositive/spark/union_view.q.out | 60 +-
.../clientpositive/union_remove_25.q.out | 20 +-
.../results/clientpositive/union_view.q.out | 60 +-
.../vector_non_constant_in_expr.q.out | 36 +
service/src/gen/thrift/gen-py/__init__.py | 0
105 files changed, 8771 insertions(+), 3392 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/763e6969/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java
----------------------------------------------------------------------
[13/20] hive git commit: HIVE-13671: Add PerfLogger to
log4j2.properties logger (Prasanth Jayachandran reviewed by Sergey Shelukhin)
Posted by jd...@apache.org.
HIVE-13671: Add PerfLogger to log4j2.properties logger (Prasanth Jayachandran reviewed by Sergey Shelukhin)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/a88050bd
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/a88050bd
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/a88050bd
Branch: refs/heads/llap
Commit: a88050bd9ae1f2cfec87a54e773a83cdb3de325f
Parents: f68b5db
Author: Prasanth Jayachandran <pr...@apache.org>
Authored: Wed May 4 21:30:45 2016 -0500
Committer: Prasanth Jayachandran <pr...@apache.org>
Committed: Wed May 4 21:30:45 2016 -0500
----------------------------------------------------------------------
common/src/main/resources/hive-log4j2.properties | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/a88050bd/common/src/main/resources/hive-log4j2.properties
----------------------------------------------------------------------
diff --git a/common/src/main/resources/hive-log4j2.properties b/common/src/main/resources/hive-log4j2.properties
index 12cd9ac..cf0369a 100644
--- a/common/src/main/resources/hive-log4j2.properties
+++ b/common/src/main/resources/hive-log4j2.properties
@@ -23,6 +23,7 @@ property.hive.log.level = INFO
property.hive.root.logger = DRFA
property.hive.log.dir = ${sys:java.io.tmpdir}/${sys:user.name}
property.hive.log.file = hive.log
+property.hive.perflogger.log.level = INFO
# list of all appenders
appenders = console, DRFA
@@ -50,7 +51,7 @@ appender.DRFA.strategy.type = DefaultRolloverStrategy
appender.DRFA.strategy.max = 30
# list of all loggers
-loggers = NIOServerCnxn, ClientCnxnSocketNIO, DataNucleus, Datastore, JPOX
+loggers = NIOServerCnxn, ClientCnxnSocketNIO, DataNucleus, Datastore, JPOX, PerfLogger
logger.NIOServerCnxn.name = org.apache.zookeeper.server.NIOServerCnxn
logger.NIOServerCnxn.level = WARN
@@ -67,6 +68,9 @@ logger.Datastore.level = ERROR
logger.JPOX.name = JPOX
logger.JPOX.level = ERROR
+logger.PerfLogger.name = org.apache.hadoop.hive.ql.log.PerfLogger
+logger.PerfLogger.level = ${sys:hive.perflogger.log.level}
+
# root logger
rootLogger.level = ${sys:hive.log.level}
rootLogger.appenderRefs = root
[02/20] hive git commit: HIVE-13638: CBO rule to pull up constants
through Sort/Limit (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)
Posted by jd...@apache.org.
HIVE-13638: CBO rule to pull up constants through Sort/Limit (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b04dc95f
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b04dc95f
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b04dc95f
Branch: refs/heads/llap
Commit: b04dc95f4fa7dda9d4806c45dbe52aed4b9f1a18
Parents: 2d33d09
Author: Jesus Camacho Rodriguez <jc...@apache.org>
Authored: Sat Apr 30 11:49:47 2016 +0100
Committer: Jesus Camacho Rodriguez <jc...@apache.org>
Committed: Wed May 4 18:57:30 2016 +0100
----------------------------------------------------------------------
.../rules/HiveReduceExpressionsRule.java | 125 ++++
.../rules/HiveSortLimitPullUpConstantsRule.java | 157 +++++
.../hadoop/hive/ql/parse/CalcitePlanner.java | 3 +
.../test/queries/clientpositive/cbo_input26.q | 54 ++
.../results/clientpositive/cbo_input26.q.out | 596 +++++++++++++++++++
.../clientpositive/load_dyn_part14.q.out | 6 +-
.../clientpositive/spark/load_dyn_part14.q.out | 6 +-
.../clientpositive/spark/union_remove_25.q.out | 60 +-
.../clientpositive/union_remove_25.q.out | 20 +-
9 files changed, 985 insertions(+), 42 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/b04dc95f/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveReduceExpressionsRule.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveReduceExpressionsRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveReduceExpressionsRule.java
index 9006f45..2fe9b75 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveReduceExpressionsRule.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveReduceExpressionsRule.java
@@ -396,6 +396,131 @@ public abstract class HiveReduceExpressionsRule extends RelOptRule {
assert constExps.size() == addCasts.size();
}
+ /** Creates a map containing each (e, constant) pair that occurs within
+ * a predicate list.
+ *
+ * @param clazz Class of expression that is considered constant
+ * @param rexBuilder Rex builder
+ * @param predicates Predicate list
+ * @param <C> what to consider a constant: {@link RexLiteral} to use a narrow
+ * definition of constant, or {@link RexNode} to use
+ * {@link RexUtil#isConstant(RexNode)}
+ * @return Map from values to constants
+ */
+ public static <C extends RexNode> ImmutableMap<RexNode, C> predicateConstants(
+ Class<C> clazz, RexBuilder rexBuilder, RelOptPredicateList predicates) {
+ // We cannot use an ImmutableMap.Builder here. If there are multiple entries
+ // with the same key (e.g. "WHERE deptno = 1 AND deptno = 2"), it doesn't
+ // matter which we take, so the latter will replace the former.
+ // The basic idea is to find all the pairs of RexNode = RexLiteral
+ // (1) If 'predicates' contain a non-EQUALS, we bail out.
+ // (2) It is OK if a RexNode is equal to the same RexLiteral several times,
+ // (e.g. "WHERE deptno = 1 AND deptno = 1")
+ // (3) It will return false if there are inconsistent constraints (e.g.
+ // "WHERE deptno = 1 AND deptno = 2")
+ final Map<RexNode, C> map = new HashMap<>();
+ final Set<RexNode> excludeSet = new HashSet<>();
+ for (RexNode predicate : predicates.pulledUpPredicates) {
+ gatherConstraints(clazz, predicate, map, excludeSet, rexBuilder);
+ }
+ final ImmutableMap.Builder<RexNode, C> builder =
+ ImmutableMap.builder();
+ for (Map.Entry<RexNode, C> entry : map.entrySet()) {
+ RexNode rexNode = entry.getKey();
+ if (!overlap(rexNode, excludeSet)) {
+ builder.put(rexNode, entry.getValue());
+ }
+ }
+ return builder.build();
+ }
+
+ private static <C extends RexNode> void gatherConstraints(Class<C> clazz,
+ RexNode predicate, Map<RexNode, C> map, Set<RexNode> excludeSet,
+ RexBuilder rexBuilder) {
+ if (predicate.getKind() != SqlKind.EQUALS) {
+ decompose(excludeSet, predicate);
+ return;
+ }
+ final List<RexNode> operands = ((RexCall) predicate).getOperands();
+ if (operands.size() != 2) {
+ decompose(excludeSet, predicate);
+ return;
+ }
+ // if it reaches here, we have rexNode equals rexNode
+ final RexNode left = operands.get(0);
+ final RexNode right = operands.get(1);
+ // note that literals are immutable too and they can only be compared through
+ // values.
+ gatherConstraint(clazz, left, right, map, excludeSet, rexBuilder);
+ gatherConstraint(clazz, right, left, map, excludeSet, rexBuilder);
+ }
+
+ /** Returns whether a value of {@code type2} can be assigned to a variable
+ * of {@code type1}.
+ *
+ * <p>For example:
+ * <ul>
+ * <li>{@code canAssignFrom(BIGINT, TINYINT)} returns {@code true}</li>
+ * <li>{@code canAssignFrom(TINYINT, BIGINT)} returns {@code false}</li>
+ * <li>{@code canAssignFrom(BIGINT, VARCHAR)} returns {@code false}</li>
+ * </ul>
+ */
+ private static boolean canAssignFrom(RelDataType type1, RelDataType type2) {
+ final SqlTypeName name1 = type1.getSqlTypeName();
+ final SqlTypeName name2 = type2.getSqlTypeName();
+ if (name1.getFamily() == name2.getFamily()) {
+ switch (name1.getFamily()) {
+ case NUMERIC:
+ return name1.compareTo(name2) >= 0;
+ default:
+ return true;
+ }
+ }
+ return false;
+ }
+
+ private static <C extends RexNode> void gatherConstraint(Class<C> clazz,
+ RexNode left, RexNode right, Map<RexNode, C> map, Set<RexNode> excludeSet,
+ RexBuilder rexBuilder) {
+ if (!clazz.isInstance(right)) {
+ return;
+ }
+ if (!RexUtil.isConstant(right)) {
+ return;
+ }
+ C constant = clazz.cast(right);
+ if (excludeSet.contains(left)) {
+ return;
+ }
+ final C existedValue = map.get(left);
+ if (existedValue == null) {
+ switch (left.getKind()) {
+ case CAST:
+ // Convert "CAST(c) = literal" to "c = literal", as long as it is a
+ // widening cast.
+ final RexNode operand = ((RexCall) left).getOperands().get(0);
+ if (canAssignFrom(left.getType(), operand.getType())) {
+ final RexNode castRight =
+ rexBuilder.makeCast(operand.getType(), constant);
+ if (castRight instanceof RexLiteral) {
+ left = operand;
+ constant = clazz.cast(castRight);
+ }
+ }
+ }
+ map.put(left, constant);
+ } else {
+ if (existedValue instanceof RexLiteral
+ && constant instanceof RexLiteral
+ && !((RexLiteral) existedValue).getValue()
+ .equals(((RexLiteral) constant).getValue())) {
+ // we found conflicting values, e.g. left = 10 and left = 20
+ map.remove(left);
+ excludeSet.add(left);
+ }
+ }
+ }
+
protected static ImmutableMap<RexNode, RexLiteral> predicateConstants(
RelOptPredicateList predicates) {
// We cannot use an ImmutableMap.Builder here. If there are multiple entries
http://git-wip-us.apache.org/repos/asf/hive/blob/b04dc95f/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSortLimitPullUpConstantsRule.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSortLimitPullUpConstantsRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSortLimitPullUpConstantsRule.java
new file mode 100644
index 0000000..d14b0ba
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSortLimitPullUpConstantsRule.java
@@ -0,0 +1,157 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.optimizer.calcite.rules;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.calcite.plan.RelOptPredicateList;
+import org.apache.calcite.plan.RelOptRule;
+import org.apache.calcite.plan.RelOptRuleCall;
+import org.apache.calcite.plan.RelOptUtil;
+import org.apache.calcite.rel.RelCollations;
+import org.apache.calcite.rel.RelFieldCollation;
+import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.core.Sort;
+import org.apache.calcite.rel.metadata.RelMetadataQuery;
+import org.apache.calcite.rel.type.RelDataTypeField;
+import org.apache.calcite.rex.RexBuilder;
+import org.apache.calcite.rex.RexLiteral;
+import org.apache.calcite.rex.RexNode;
+import org.apache.calcite.rex.RexUtil;
+import org.apache.calcite.tools.RelBuilder;
+import org.apache.calcite.tools.RelBuilderFactory;
+import org.apache.calcite.util.Pair;
+import org.apache.calcite.util.mapping.Mappings;
+import org.apache.hadoop.hive.ql.optimizer.calcite.HiveRelFactories;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveSortLimit;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.collect.ImmutableList;
+
+/**
+ * Planner rule that pulls up constant keys through a SortLimit operator.
+ *
+ * This rule is only applied on SortLimit operators that are not the root
+ * of the plan tree. This is done because the interaction of this rule
+ * with the AST conversion may cause some optimizations to not kick in
+ * e.g. SimpleFetchOptimizer. Nevertheless, this will not have any
+ * performance impact in the resulting plans.
+ */
+public class HiveSortLimitPullUpConstantsRule extends RelOptRule {
+
+ protected static final Logger LOG = LoggerFactory.getLogger(HiveSortLimitPullUpConstantsRule.class);
+
+
+ public static final HiveSortLimitPullUpConstantsRule INSTANCE =
+ new HiveSortLimitPullUpConstantsRule(HiveSortLimit.class,
+ HiveRelFactories.HIVE_BUILDER);
+
+ private HiveSortLimitPullUpConstantsRule(
+ Class<? extends Sort> sortClass,
+ RelBuilderFactory relBuilderFactory) {
+ super(operand(RelNode.class,
+ operand(sortClass, any())),
+ relBuilderFactory, null);
+ }
+
+ @Override
+ public void onMatch(RelOptRuleCall call) {
+ final RelNode parent = call.rel(0);
+ final Sort sort = call.rel(1);
+
+ final int count = sort.getInput().getRowType().getFieldCount();
+ if (count == 1) {
+ // No room for optimization since we cannot convert to an empty
+ // Project operator.
+ return;
+ }
+
+ final RexBuilder rexBuilder = sort.getCluster().getRexBuilder();
+ final RelMetadataQuery mq = RelMetadataQuery.instance();
+ final RelOptPredicateList predicates = mq.getPulledUpPredicates(sort.getInput());
+ if (predicates == null) {
+ return;
+ }
+
+ Map<RexNode, RexNode> constants = HiveReduceExpressionsRule.predicateConstants(
+ RexNode.class, rexBuilder, predicates);
+
+ // None of the expressions are constant. Nothing to do.
+ if (constants.isEmpty()) {
+ return;
+ }
+
+ if (count == constants.size()) {
+ // At least a single item in project is required.
+ final Map<RexNode, RexNode> map = new HashMap<>(constants);
+ map.remove(map.keySet().iterator().next());
+ constants = map;
+ }
+
+ // Create expressions for Project operators before and after the Sort
+ List<RelDataTypeField> fields = sort.getInput().getRowType().getFieldList();
+ List<Pair<RexNode, String>> newChildExprs = new ArrayList<>();
+ List<RexNode> topChildExprs = new ArrayList<>();
+ List<String> topChildExprsFields = new ArrayList<>();
+ for (int i = 0; i < count ; i++) {
+ RexNode expr = rexBuilder.makeInputRef(sort.getInput(), i);
+ RelDataTypeField field = fields.get(i);
+ if (constants.containsKey(expr)) {
+ topChildExprs.add(constants.get(expr));
+ topChildExprsFields.add(field.getName());
+ } else {
+ newChildExprs.add(Pair.<RexNode,String>of(expr, field.getName()));
+ topChildExprs.add(expr);
+ topChildExprsFields.add(field.getName());
+ }
+ }
+
+ // Update field collations
+ final Mappings.TargetMapping mapping =
+ RelOptUtil.permutation(Pair.left(newChildExprs), sort.getInput().getRowType()).inverse();
+ List<RelFieldCollation> fieldCollations = new ArrayList<>();
+ for (RelFieldCollation fc : sort.getCollation().getFieldCollations()) {
+ final int target = mapping.getTargetOpt(fc.getFieldIndex());
+ if (target < 0) {
+ // It is a constant, we can ignore it
+ continue;
+ }
+ fieldCollations.add(fc.copy(target));
+ }
+
+ // Update top Project positions
+ topChildExprs = ImmutableList.copyOf(RexUtil.apply(mapping, topChildExprs));
+
+ // Create new Project-Sort-Project sequence
+ final RelBuilder relBuilder = call.builder();
+ relBuilder.push(sort.getInput());
+ relBuilder.project(Pair.left(newChildExprs), Pair.right(newChildExprs));
+ final ImmutableList<RexNode> sortFields =
+ relBuilder.fields(RelCollations.of(fieldCollations));
+ relBuilder.sortLimit(sort.offset == null ? -1 : RexLiteral.intValue(sort.offset),
+ sort.fetch == null ? -1 : RexLiteral.intValue(sort.fetch), sortFields);
+ relBuilder.project(topChildExprs, topChildExprsFields);
+
+ call.transformTo(parent.copy(parent.getTraitSet(), ImmutableList.of(relBuilder.build())));
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hive/blob/b04dc95f/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
index 8e00e0b..377573b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
@@ -153,6 +153,7 @@ import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveJoinCommuteRule;
import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveJoinProjectTransposeRule;
import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveJoinPushTransitivePredicatesRule;
import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveJoinToMultiJoinRule;
+import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveSortLimitPullUpConstantsRule;
import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HivePartitionPruneRule;
import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HivePointLookupOptimizerRule;
import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HivePreFilteringRule;
@@ -1163,6 +1164,8 @@ public class CalcitePlanner extends SemanticAnalyzer {
rules.add(HiveJoinAddNotNullRule.INSTANCE_SEMIJOIN);
rules.add(HiveJoinPushTransitivePredicatesRule.INSTANCE_JOIN);
rules.add(HiveJoinPushTransitivePredicatesRule.INSTANCE_SEMIJOIN);
+ rules.add(HiveSortMergeRule.INSTANCE);
+ rules.add(HiveSortLimitPullUpConstantsRule.INSTANCE);
perfLogger.PerfLogBegin(this.getClass().getName(), PerfLogger.OPTIMIZER);
basePlan = hepPlan(basePlan, true, mdProvider, executorProvider, HepMatchOrder.BOTTOM_UP,
rules.toArray(new RelOptRule[rules.size()]));
http://git-wip-us.apache.org/repos/asf/hive/blob/b04dc95f/ql/src/test/queries/clientpositive/cbo_input26.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/cbo_input26.q b/ql/src/test/queries/clientpositive/cbo_input26.q
new file mode 100644
index 0000000..40050f9
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/cbo_input26.q
@@ -0,0 +1,54 @@
+set hive.mapred.mode=nonstrict;
+set hive.optimize.constant.propagation=false;
+
+explain
+select * from (
+ select * from (select * from srcpart a where a.ds = '2008-04-08' and a.hr = '11' order by a.key limit 5)pa
+ union all
+ select * from (select * from srcpart b where b.ds = '2008-04-08' and b.hr = '14' limit 5)pb
+)subq;
+
+select * from (
+ select * from (select * from srcpart a where a.ds = '2008-04-08' and a.hr = '11' order by a.key limit 5)pa
+ union all
+ select * from (select * from srcpart b where b.ds = '2008-04-08' and b.hr = '14' limit 5)pb
+)subq;
+
+explain
+select * from (
+ select * from (select a.ds, a.key, a.hr from srcpart a where a.ds = '2008-04-08' and a.hr = '11' order by a.key limit 5)pa
+ union all
+ select * from (select b.ds, b.key, b.hr from srcpart b where b.ds = '2008-04-08' and b.hr = '14' limit 5)pb
+)subq;
+
+select * from (
+ select * from (select a.ds, a.key, a.hr from srcpart a where a.ds = '2008-04-08' and a.hr = '11' order by a.key limit 5)pa
+ union all
+ select * from (select b.ds, b.key, b.hr from srcpart b where b.ds = '2008-04-08' and b.hr = '14' limit 5)pb
+)subq;
+
+explain
+select * from (
+ select * from (select a.ds, a.key, a.hr from srcpart a where a.ds = '2008-04-08' and a.hr = '11' order by a.hr,a.key limit 5)pa
+ union all
+ select * from (select b.ds, b.key, b.hr from srcpart b where b.ds = '2008-04-08' and b.hr = '14' limit 5)pb
+)subq;
+
+select * from (
+ select * from (select a.ds, a.key, a.hr from srcpart a where a.ds = '2008-04-08' and a.hr = '11' order by a.hr,a.key limit 5)pa
+ union all
+ select * from (select b.ds, b.key, b.hr from srcpart b where b.ds = '2008-04-08' and b.hr = '14' limit 5)pb
+)subq;
+
+explain
+select * from (
+ select * from (select a.key, a.ds, a.value from srcpart a where a.ds = '2008-04-08' and a.hr = '11' order by a.ds limit 5)pa
+ union all
+ select * from (select b.key, b.ds, b.value from srcpart b where b.ds = '2008-04-08' and b.hr = '14' limit 5)pb
+)subq;
+
+select * from (
+ select * from (select a.key, a.ds, a.value from srcpart a where a.ds = '2008-04-08' and a.hr = '11' order by a.ds limit 5)pa
+ union all
+ select * from (select b.key, b.ds, b.value from srcpart b where b.ds = '2008-04-08' and b.hr = '14' limit 5)pb
+)subq;
http://git-wip-us.apache.org/repos/asf/hive/blob/b04dc95f/ql/src/test/results/clientpositive/cbo_input26.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cbo_input26.q.out b/ql/src/test/results/clientpositive/cbo_input26.q.out
new file mode 100644
index 0000000..5c4c771
--- /dev/null
+++ b/ql/src/test/results/clientpositive/cbo_input26.q.out
@@ -0,0 +1,596 @@
+PREHOOK: query: explain
+select * from (
+ select * from (select * from srcpart a where a.ds = '2008-04-08' and a.hr = '11' order by a.key limit 5)pa
+ union all
+ select * from (select * from srcpart b where b.ds = '2008-04-08' and b.hr = '14' limit 5)pb
+)subq
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select * from (
+ select * from (select * from srcpart a where a.ds = '2008-04-08' and a.hr = '11' order by a.key limit 5)pa
+ union all
+ select * from (select * from srcpart b where b.ds = '2008-04-08' and b.hr = '14' limit 5)pb
+)subq
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1, Stage-3
+ Stage-3 is a root stage
+ Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ TopN Hash Memory Usage: 0.1
+ value expressions: _col1 (type: string)
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Limit
+ Number of rows: 5
+ Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col0 (type: string), _col1 (type: string), '2008-04-08' (type: string), '11' (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+ Stage: Stage-2
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ Union
+ Statistics: Num rows: 6 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 6 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ TableScan
+ Union
+ Statistics: Num rows: 6 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 6 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-3
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ Filter Operator
+ predicate: ((ds = '2008-04-08') and (hr = '14')) (type: boolean)
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ Limit
+ Number of rows: 5
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ TopN Hash Memory Usage: 0.1
+ value expressions: _col0 (type: string), _col1 (type: string)
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: string), VALUE._col1 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ Limit
+ Number of rows: 5
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ Select Operator
+ expressions: _col0 (type: string), _col1 (type: string), '2008-04-08' (type: string), '14' (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ File Output Operator
+ compressed: false
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select * from (
+ select * from (select * from srcpart a where a.ds = '2008-04-08' and a.hr = '11' order by a.key limit 5)pa
+ union all
+ select * from (select * from srcpart b where b.ds = '2008-04-08' and b.hr = '14' limit 5)pb
+)subq
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+#### A masked pattern was here ####
+POSTHOOK: query: select * from (
+ select * from (select * from srcpart a where a.ds = '2008-04-08' and a.hr = '11' order by a.key limit 5)pa
+ union all
+ select * from (select * from srcpart b where b.ds = '2008-04-08' and b.hr = '14' limit 5)pb
+)subq
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+#### A masked pattern was here ####
+0 val_0 2008-04-08 11
+0 val_0 2008-04-08 11
+0 val_0 2008-04-08 11
+10 val_10 2008-04-08 11
+100 val_100 2008-04-08 11
+PREHOOK: query: explain
+select * from (
+ select * from (select a.ds, a.key, a.hr from srcpart a where a.ds = '2008-04-08' and a.hr = '11' order by a.key limit 5)pa
+ union all
+ select * from (select b.ds, b.key, b.hr from srcpart b where b.ds = '2008-04-08' and b.hr = '14' limit 5)pb
+)subq
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select * from (
+ select * from (select a.ds, a.key, a.hr from srcpart a where a.ds = '2008-04-08' and a.hr = '11' order by a.key limit 5)pa
+ union all
+ select * from (select b.ds, b.key, b.hr from srcpart b where b.ds = '2008-04-08' and b.hr = '14' limit 5)pb
+)subq
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1, Stage-3
+ Stage-3 is a root stage
+ Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ TopN Hash Memory Usage: 0.1
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey0 (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Limit
+ Number of rows: 5
+ Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: '2008-04-08' (type: string), _col0 (type: string), '11' (type: string)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+ Stage: Stage-2
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ Union
+ Statistics: Num rows: 6 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 6 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ TableScan
+ Union
+ Statistics: Num rows: 6 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 6 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-3
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ Filter Operator
+ predicate: ((ds = '2008-04-08') and (hr = '14')) (type: boolean)
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ Limit
+ Number of rows: 5
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ TopN Hash Memory Usage: 0.1
+ value expressions: _col0 (type: string)
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ Limit
+ Number of rows: 5
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ Select Operator
+ expressions: '2008-04-08' (type: string), _col0 (type: string), '14' (type: string)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ File Output Operator
+ compressed: false
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select * from (
+ select * from (select a.ds, a.key, a.hr from srcpart a where a.ds = '2008-04-08' and a.hr = '11' order by a.key limit 5)pa
+ union all
+ select * from (select b.ds, b.key, b.hr from srcpart b where b.ds = '2008-04-08' and b.hr = '14' limit 5)pb
+)subq
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+#### A masked pattern was here ####
+POSTHOOK: query: select * from (
+ select * from (select a.ds, a.key, a.hr from srcpart a where a.ds = '2008-04-08' and a.hr = '11' order by a.key limit 5)pa
+ union all
+ select * from (select b.ds, b.key, b.hr from srcpart b where b.ds = '2008-04-08' and b.hr = '14' limit 5)pb
+)subq
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+#### A masked pattern was here ####
+2008-04-08 0 11
+2008-04-08 0 11
+2008-04-08 0 11
+2008-04-08 10 11
+2008-04-08 100 11
+PREHOOK: query: explain
+select * from (
+ select * from (select a.ds, a.key, a.hr from srcpart a where a.ds = '2008-04-08' and a.hr = '11' order by a.hr,a.key limit 5)pa
+ union all
+ select * from (select b.ds, b.key, b.hr from srcpart b where b.ds = '2008-04-08' and b.hr = '14' limit 5)pb
+)subq
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select * from (
+ select * from (select a.ds, a.key, a.hr from srcpart a where a.ds = '2008-04-08' and a.hr = '11' order by a.hr,a.key limit 5)pa
+ union all
+ select * from (select b.ds, b.key, b.hr from srcpart b where b.ds = '2008-04-08' and b.hr = '14' limit 5)pb
+)subq
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1, Stage-3
+ Stage-3 is a root stage
+ Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ TopN Hash Memory Usage: 0.1
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey0 (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Limit
+ Number of rows: 5
+ Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: '2008-04-08' (type: string), _col0 (type: string), '11' (type: string)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+ Stage: Stage-2
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ Union
+ Statistics: Num rows: 6 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 6 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ TableScan
+ Union
+ Statistics: Num rows: 6 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 6 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-3
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ Filter Operator
+ predicate: ((ds = '2008-04-08') and (hr = '14')) (type: boolean)
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ Limit
+ Number of rows: 5
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ TopN Hash Memory Usage: 0.1
+ value expressions: _col0 (type: string)
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ Limit
+ Number of rows: 5
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ Select Operator
+ expressions: '2008-04-08' (type: string), _col0 (type: string), '14' (type: string)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ File Output Operator
+ compressed: false
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select * from (
+ select * from (select a.ds, a.key, a.hr from srcpart a where a.ds = '2008-04-08' and a.hr = '11' order by a.hr,a.key limit 5)pa
+ union all
+ select * from (select b.ds, b.key, b.hr from srcpart b where b.ds = '2008-04-08' and b.hr = '14' limit 5)pb
+)subq
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+#### A masked pattern was here ####
+POSTHOOK: query: select * from (
+ select * from (select a.ds, a.key, a.hr from srcpart a where a.ds = '2008-04-08' and a.hr = '11' order by a.hr,a.key limit 5)pa
+ union all
+ select * from (select b.ds, b.key, b.hr from srcpart b where b.ds = '2008-04-08' and b.hr = '14' limit 5)pb
+)subq
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+#### A masked pattern was here ####
+2008-04-08 0 11
+2008-04-08 0 11
+2008-04-08 0 11
+2008-04-08 10 11
+2008-04-08 100 11
+PREHOOK: query: explain
+select * from (
+ select * from (select a.key, a.ds, a.value from srcpart a where a.ds = '2008-04-08' and a.hr = '11' order by a.ds limit 5)pa
+ union all
+ select * from (select b.key, b.ds, b.value from srcpart b where b.ds = '2008-04-08' and b.hr = '14' limit 5)pb
+)subq
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select * from (
+ select * from (select a.key, a.ds, a.value from srcpart a where a.ds = '2008-04-08' and a.hr = '11' order by a.ds limit 5)pa
+ union all
+ select * from (select b.key, b.ds, b.value from srcpart b where b.ds = '2008-04-08' and b.hr = '14' limit 5)pb
+)subq
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1, Stage-3
+ Stage-3 is a root stage
+ Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Limit
+ Number of rows: 5
+ Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+ TopN Hash Memory Usage: 0.1
+ value expressions: _col0 (type: string), _col1 (type: string)
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: string), VALUE._col1 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+ Limit
+ Number of rows: 5
+ Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col0 (type: string), '2008-04-08' (type: string), _col1 (type: string)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+ Stage: Stage-2
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ Union
+ Statistics: Num rows: 6 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 6 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ TableScan
+ Union
+ Statistics: Num rows: 6 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 6 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-3
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ Filter Operator
+ predicate: ((ds = '2008-04-08') and (hr = '14')) (type: boolean)
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ Limit
+ Number of rows: 5
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ TopN Hash Memory Usage: 0.1
+ value expressions: _col0 (type: string), _col1 (type: string)
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: string), VALUE._col1 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ Limit
+ Number of rows: 5
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ Select Operator
+ expressions: _col0 (type: string), '2008-04-08' (type: string), _col1 (type: string)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ File Output Operator
+ compressed: false
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select * from (
+ select * from (select a.key, a.ds, a.value from srcpart a where a.ds = '2008-04-08' and a.hr = '11' order by a.ds limit 5)pa
+ union all
+ select * from (select b.key, b.ds, b.value from srcpart b where b.ds = '2008-04-08' and b.hr = '14' limit 5)pb
+)subq
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+#### A masked pattern was here ####
+POSTHOOK: query: select * from (
+ select * from (select a.key, a.ds, a.value from srcpart a where a.ds = '2008-04-08' and a.hr = '11' order by a.ds limit 5)pa
+ union all
+ select * from (select b.key, b.ds, b.value from srcpart b where b.ds = '2008-04-08' and b.hr = '14' limit 5)pb
+)subq
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+#### A masked pattern was here ####
+165 2008-04-08 val_165
+27 2008-04-08 val_27
+311 2008-04-08 val_311
+86 2008-04-08 val_86
+238 2008-04-08 val_238
http://git-wip-us.apache.org/repos/asf/hive/blob/b04dc95f/ql/src/test/results/clientpositive/load_dyn_part14.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/load_dyn_part14.q.out b/ql/src/test/results/clientpositive/load_dyn_part14.q.out
index 53e9df3..57c4287 100644
--- a/ql/src/test/results/clientpositive/load_dyn_part14.q.out
+++ b/ql/src/test/results/clientpositive/load_dyn_part14.q.out
@@ -74,13 +74,13 @@ STAGE PLANS:
alias: src
Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
- Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
+ Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE
Limit
Number of rows: 2
- Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
+ Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
sort order:
- Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
+ Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
TopN Hash Memory Usage: 0.1
Reduce Operator Tree:
Limit
http://git-wip-us.apache.org/repos/asf/hive/blob/b04dc95f/ql/src/test/results/clientpositive/spark/load_dyn_part14.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/load_dyn_part14.q.out b/ql/src/test/results/clientpositive/spark/load_dyn_part14.q.out
index 84d99c3..1940561 100644
--- a/ql/src/test/results/clientpositive/spark/load_dyn_part14.q.out
+++ b/ql/src/test/results/clientpositive/spark/load_dyn_part14.q.out
@@ -73,13 +73,13 @@ STAGE PLANS:
alias: src
Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
- Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
+ Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE
Limit
Number of rows: 2
- Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
+ Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
sort order:
- Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
+ Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
TopN Hash Memory Usage: 0.1
Reducer 2
Reduce Operator Tree:
http://git-wip-us.apache.org/repos/asf/hive/blob/b04dc95f/ql/src/test/results/clientpositive/spark/union_remove_25.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union_remove_25.q.out b/ql/src/test/results/clientpositive/spark/union_remove_25.q.out
index 253bf8f..190bea5 100644
--- a/ql/src/test/results/clientpositive/spark/union_remove_25.q.out
+++ b/ql/src/test/results/clientpositive/spark/union_remove_25.q.out
@@ -438,7 +438,7 @@ STAGE PLANS:
Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string), value (type: string), hr (type: string)
- outputColumnNames: _col0, _col1, _col3
+ outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
Limit
Number of rows: 1000
@@ -447,49 +447,57 @@ STAGE PLANS:
sort order:
Statistics: Num rows: 1000 Data size: 10000 Basic stats: COMPLETE Column stats: NONE
TopN Hash Memory Usage: 0.1
- value expressions: _col0 (type: string), _col1 (type: string), _col3 (type: string)
+ value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
Reducer 2
Reduce Operator Tree:
Select Operator
- expressions: VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col3 (type: string)
- outputColumnNames: _col0, _col1, _col3
+ expressions: VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: string)
+ outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 1000 Data size: 10000 Basic stats: COMPLETE Column stats: NONE
Limit
Number of rows: 1000
Statistics: Num rows: 1000 Data size: 10000 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: _col0 (type: string), UDFToLong(_col1) (type: bigint), '2008-04-08' (type: string), _col3 (type: string)
- outputColumnNames: _col0, _col1, _col2, _col3
- Statistics: Num rows: 2000 Data size: 20000 Basic stats: COMPLETE Column stats: NONE
- File Output Operator
- compressed: false
+ expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
+ outputColumnNames: _col0, _col1, _col3
+ Statistics: Num rows: 1000 Data size: 10000 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col0 (type: string), UDFToLong(_col1) (type: bigint), '2008-04-08' (type: string), _col3 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3
Statistics: Num rows: 2000 Data size: 20000 Basic stats: COMPLETE Column stats: NONE
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- name: default.outputtbl3
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 2000 Data size: 20000 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.outputtbl3
Reducer 4
Reduce Operator Tree:
Select Operator
- expressions: VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col3 (type: string)
- outputColumnNames: _col0, _col1, _col3
+ expressions: VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: string)
+ outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 1000 Data size: 10000 Basic stats: COMPLETE Column stats: NONE
Limit
Number of rows: 1000
Statistics: Num rows: 1000 Data size: 10000 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: _col0 (type: string), UDFToLong(_col1) (type: bigint), '2008-04-08' (type: string), _col3 (type: string)
- outputColumnNames: _col0, _col1, _col2, _col3
- Statistics: Num rows: 2000 Data size: 20000 Basic stats: COMPLETE Column stats: NONE
- File Output Operator
- compressed: false
+ expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
+ outputColumnNames: _col0, _col1, _col3
+ Statistics: Num rows: 1000 Data size: 10000 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col0 (type: string), UDFToLong(_col1) (type: bigint), '2008-04-08' (type: string), _col3 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3
Statistics: Num rows: 2000 Data size: 20000 Basic stats: COMPLETE Column stats: NONE
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- name: default.outputtbl3
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 2000 Data size: 20000 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.outputtbl3
Stage: Stage-0
Move Operator
http://git-wip-us.apache.org/repos/asf/hive/blob/b04dc95f/ql/src/test/results/clientpositive/union_remove_25.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union_remove_25.q.out b/ql/src/test/results/clientpositive/union_remove_25.q.out
index 54ddf56..3869735 100644
--- a/ql/src/test/results/clientpositive/union_remove_25.q.out
+++ b/ql/src/test/results/clientpositive/union_remove_25.q.out
@@ -461,7 +461,7 @@ STAGE PLANS:
Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string), value (type: string), hr (type: string)
- outputColumnNames: _col0, _col1, _col3
+ outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
Limit
Number of rows: 1000
@@ -470,17 +470,17 @@ STAGE PLANS:
sort order:
Statistics: Num rows: 1000 Data size: 10000 Basic stats: COMPLETE Column stats: NONE
TopN Hash Memory Usage: 0.1
- value expressions: _col0 (type: string), _col1 (type: string), _col3 (type: string)
+ value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
Reduce Operator Tree:
Select Operator
- expressions: VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col3 (type: string)
- outputColumnNames: _col0, _col1, _col3
+ expressions: VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: string)
+ outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 1000 Data size: 10000 Basic stats: COMPLETE Column stats: NONE
Limit
Number of rows: 1000
Statistics: Num rows: 1000 Data size: 10000 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: _col0 (type: string), UDFToLong(_col1) (type: bigint), '2008-04-08' (type: string), _col3 (type: string)
+ expressions: _col0 (type: string), UDFToLong(_col1) (type: bigint), '2008-04-08' (type: string), _col2 (type: string)
outputColumnNames: _col0, _col1, _col2, _col3
Statistics: Num rows: 1000 Data size: 10000 Basic stats: COMPLETE Column stats: NONE
File Output Operator
@@ -513,7 +513,7 @@ STAGE PLANS:
Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string), value (type: string), hr (type: string)
- outputColumnNames: _col0, _col1, _col3
+ outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
Limit
Number of rows: 1000
@@ -522,17 +522,17 @@ STAGE PLANS:
sort order:
Statistics: Num rows: 1000 Data size: 10000 Basic stats: COMPLETE Column stats: NONE
TopN Hash Memory Usage: 0.1
- value expressions: _col0 (type: string), _col1 (type: string), _col3 (type: string)
+ value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
Reduce Operator Tree:
Select Operator
- expressions: VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col3 (type: string)
- outputColumnNames: _col0, _col1, _col3
+ expressions: VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: string)
+ outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 1000 Data size: 10000 Basic stats: COMPLETE Column stats: NONE
Limit
Number of rows: 1000
Statistics: Num rows: 1000 Data size: 10000 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: _col0 (type: string), UDFToLong(_col1) (type: bigint), '2008-04-08' (type: string), _col3 (type: string)
+ expressions: _col0 (type: string), UDFToLong(_col1) (type: bigint), '2008-04-08' (type: string), _col2 (type: string)
outputColumnNames: _col0, _col1, _col2, _col3
Statistics: Num rows: 1000 Data size: 10000 Basic stats: COMPLETE Column stats: NONE
File Output Operator
[03/20] hive git commit: HIVE-13351: Support drop Primary Key/Foreign
Key constraints (Hari Subramaniyan, reviewed by Ashutosh Chauhan)
Posted by jd...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php b/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
index 4f0c8fd..0e7b745 100644
--- a/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
+++ b/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
@@ -167,6 +167,12 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf {
*/
public function create_table_with_constraints(\metastore\Table $tbl, array $primaryKeys, array $foreignKeys);
/**
+ * @param \metastore\DropConstraintRequest $req
+ * @throws \metastore\NoSuchObjectException
+ * @throws \metastore\MetaException
+ */
+ public function drop_constraint(\metastore\DropConstraintRequest $req);
+ /**
* @param string $dbname
* @param string $name
* @param bool $deleteData
@@ -2250,6 +2256,60 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas
return;
}
+ public function drop_constraint(\metastore\DropConstraintRequest $req)
+ {
+ $this->send_drop_constraint($req);
+ $this->recv_drop_constraint();
+ }
+
+ public function send_drop_constraint(\metastore\DropConstraintRequest $req)
+ {
+ $args = new \metastore\ThriftHiveMetastore_drop_constraint_args();
+ $args->req = $req;
+ $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary');
+ if ($bin_accel)
+ {
+ thrift_protocol_write_binary($this->output_, 'drop_constraint', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+ }
+ else
+ {
+ $this->output_->writeMessageBegin('drop_constraint', TMessageType::CALL, $this->seqid_);
+ $args->write($this->output_);
+ $this->output_->writeMessageEnd();
+ $this->output_->getTransport()->flush();
+ }
+ }
+
+ public function recv_drop_constraint()
+ {
+ $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary');
+ if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_drop_constraint_result', $this->input_->isStrictRead());
+ else
+ {
+ $rseqid = 0;
+ $fname = null;
+ $mtype = 0;
+
+ $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+ if ($mtype == TMessageType::EXCEPTION) {
+ $x = new TApplicationException();
+ $x->read($this->input_);
+ $this->input_->readMessageEnd();
+ throw $x;
+ }
+ $result = new \metastore\ThriftHiveMetastore_drop_constraint_result();
+ $result->read($this->input_);
+ $this->input_->readMessageEnd();
+ }
+ if ($result->o1 !== null) {
+ throw $result->o1;
+ }
+ if ($result->o3 !== null) {
+ throw $result->o3;
+ }
+ return;
+ }
+
public function drop_table($dbname, $name, $deleteData)
{
$this->send_drop_table($dbname, $name, $deleteData);
@@ -13889,6 +13949,188 @@ class ThriftHiveMetastore_create_table_with_constraints_result {
}
+class ThriftHiveMetastore_drop_constraint_args {
+ static $_TSPEC;
+
+ /**
+ * @var \metastore\DropConstraintRequest
+ */
+ public $req = null;
+
+ public function __construct($vals=null) {
+ if (!isset(self::$_TSPEC)) {
+ self::$_TSPEC = array(
+ 1 => array(
+ 'var' => 'req',
+ 'type' => TType::STRUCT,
+ 'class' => '\metastore\DropConstraintRequest',
+ ),
+ );
+ }
+ if (is_array($vals)) {
+ if (isset($vals['req'])) {
+ $this->req = $vals['req'];
+ }
+ }
+ }
+
+ public function getName() {
+ return 'ThriftHiveMetastore_drop_constraint_args';
+ }
+
+ public function read($input)
+ {
+ $xfer = 0;
+ $fname = null;
+ $ftype = 0;
+ $fid = 0;
+ $xfer += $input->readStructBegin($fname);
+ while (true)
+ {
+ $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+ if ($ftype == TType::STOP) {
+ break;
+ }
+ switch ($fid)
+ {
+ case 1:
+ if ($ftype == TType::STRUCT) {
+ $this->req = new \metastore\DropConstraintRequest();
+ $xfer += $this->req->read($input);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ default:
+ $xfer += $input->skip($ftype);
+ break;
+ }
+ $xfer += $input->readFieldEnd();
+ }
+ $xfer += $input->readStructEnd();
+ return $xfer;
+ }
+
+ public function write($output) {
+ $xfer = 0;
+ $xfer += $output->writeStructBegin('ThriftHiveMetastore_drop_constraint_args');
+ if ($this->req !== null) {
+ if (!is_object($this->req)) {
+ throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
+ }
+ $xfer += $output->writeFieldBegin('req', TType::STRUCT, 1);
+ $xfer += $this->req->write($output);
+ $xfer += $output->writeFieldEnd();
+ }
+ $xfer += $output->writeFieldStop();
+ $xfer += $output->writeStructEnd();
+ return $xfer;
+ }
+
+}
+
+class ThriftHiveMetastore_drop_constraint_result {
+ static $_TSPEC;
+
+ /**
+ * @var \metastore\NoSuchObjectException
+ */
+ public $o1 = null;
+ /**
+ * @var \metastore\MetaException
+ */
+ public $o3 = null;
+
+ public function __construct($vals=null) {
+ if (!isset(self::$_TSPEC)) {
+ self::$_TSPEC = array(
+ 1 => array(
+ 'var' => 'o1',
+ 'type' => TType::STRUCT,
+ 'class' => '\metastore\NoSuchObjectException',
+ ),
+ 2 => array(
+ 'var' => 'o3',
+ 'type' => TType::STRUCT,
+ 'class' => '\metastore\MetaException',
+ ),
+ );
+ }
+ if (is_array($vals)) {
+ if (isset($vals['o1'])) {
+ $this->o1 = $vals['o1'];
+ }
+ if (isset($vals['o3'])) {
+ $this->o3 = $vals['o3'];
+ }
+ }
+ }
+
+ public function getName() {
+ return 'ThriftHiveMetastore_drop_constraint_result';
+ }
+
+ public function read($input)
+ {
+ $xfer = 0;
+ $fname = null;
+ $ftype = 0;
+ $fid = 0;
+ $xfer += $input->readStructBegin($fname);
+ while (true)
+ {
+ $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+ if ($ftype == TType::STOP) {
+ break;
+ }
+ switch ($fid)
+ {
+ case 1:
+ if ($ftype == TType::STRUCT) {
+ $this->o1 = new \metastore\NoSuchObjectException();
+ $xfer += $this->o1->read($input);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 2:
+ if ($ftype == TType::STRUCT) {
+ $this->o3 = new \metastore\MetaException();
+ $xfer += $this->o3->read($input);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ default:
+ $xfer += $input->skip($ftype);
+ break;
+ }
+ $xfer += $input->readFieldEnd();
+ }
+ $xfer += $input->readStructEnd();
+ return $xfer;
+ }
+
+ public function write($output) {
+ $xfer = 0;
+ $xfer += $output->writeStructBegin('ThriftHiveMetastore_drop_constraint_result');
+ if ($this->o1 !== null) {
+ $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1);
+ $xfer += $this->o1->write($output);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->o3 !== null) {
+ $xfer += $output->writeFieldBegin('o3', TType::STRUCT, 2);
+ $xfer += $this->o3->write($output);
+ $xfer += $output->writeFieldEnd();
+ }
+ $xfer += $output->writeFieldStop();
+ $xfer += $output->writeStructEnd();
+ return $xfer;
+ }
+
+}
+
class ThriftHiveMetastore_drop_table_args {
static $_TSPEC;
http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/metastore/src/gen/thrift/gen-php/metastore/Types.php
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-php/metastore/Types.php b/metastore/src/gen/thrift/gen-php/metastore/Types.php
index e2fa963..a8a7db9 100644
--- a/metastore/src/gen/thrift/gen-php/metastore/Types.php
+++ b/metastore/src/gen/thrift/gen-php/metastore/Types.php
@@ -9153,6 +9153,127 @@ class ForeignKeysResponse {
}
+class DropConstraintRequest {
+ static $_TSPEC;
+
+ /**
+ * @var string
+ */
+ public $dbname = null;
+ /**
+ * @var string
+ */
+ public $tablename = null;
+ /**
+ * @var string
+ */
+ public $constraintname = null;
+
+ public function __construct($vals=null) {
+ if (!isset(self::$_TSPEC)) {
+ self::$_TSPEC = array(
+ 1 => array(
+ 'var' => 'dbname',
+ 'type' => TType::STRING,
+ ),
+ 2 => array(
+ 'var' => 'tablename',
+ 'type' => TType::STRING,
+ ),
+ 3 => array(
+ 'var' => 'constraintname',
+ 'type' => TType::STRING,
+ ),
+ );
+ }
+ if (is_array($vals)) {
+ if (isset($vals['dbname'])) {
+ $this->dbname = $vals['dbname'];
+ }
+ if (isset($vals['tablename'])) {
+ $this->tablename = $vals['tablename'];
+ }
+ if (isset($vals['constraintname'])) {
+ $this->constraintname = $vals['constraintname'];
+ }
+ }
+ }
+
+ public function getName() {
+ return 'DropConstraintRequest';
+ }
+
+ public function read($input)
+ {
+ $xfer = 0;
+ $fname = null;
+ $ftype = 0;
+ $fid = 0;
+ $xfer += $input->readStructBegin($fname);
+ while (true)
+ {
+ $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+ if ($ftype == TType::STOP) {
+ break;
+ }
+ switch ($fid)
+ {
+ case 1:
+ if ($ftype == TType::STRING) {
+ $xfer += $input->readString($this->dbname);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 2:
+ if ($ftype == TType::STRING) {
+ $xfer += $input->readString($this->tablename);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 3:
+ if ($ftype == TType::STRING) {
+ $xfer += $input->readString($this->constraintname);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ default:
+ $xfer += $input->skip($ftype);
+ break;
+ }
+ $xfer += $input->readFieldEnd();
+ }
+ $xfer += $input->readStructEnd();
+ return $xfer;
+ }
+
+ public function write($output) {
+ $xfer = 0;
+ $xfer += $output->writeStructBegin('DropConstraintRequest');
+ if ($this->dbname !== null) {
+ $xfer += $output->writeFieldBegin('dbname', TType::STRING, 1);
+ $xfer += $output->writeString($this->dbname);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->tablename !== null) {
+ $xfer += $output->writeFieldBegin('tablename', TType::STRING, 2);
+ $xfer += $output->writeString($this->tablename);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->constraintname !== null) {
+ $xfer += $output->writeFieldBegin('constraintname', TType::STRING, 3);
+ $xfer += $output->writeString($this->constraintname);
+ $xfer += $output->writeFieldEnd();
+ }
+ $xfer += $output->writeFieldStop();
+ $xfer += $output->writeStructEnd();
+ return $xfer;
+ }
+
+}
+
class PartitionsByExprResult {
static $_TSPEC;
http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
index 3ec46f1..5323d9f 100755
--- a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
+++ b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
@@ -43,6 +43,7 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help':
print(' void create_table(Table tbl)')
print(' void create_table_with_environment_context(Table tbl, EnvironmentContext environment_context)')
print(' void create_table_with_constraints(Table tbl, primaryKeys, foreignKeys)')
+ print(' void drop_constraint(DropConstraintRequest req)')
print(' void drop_table(string dbname, string name, bool deleteData)')
print(' void drop_table_with_environment_context(string dbname, string name, bool deleteData, EnvironmentContext environment_context)')
print(' get_tables(string db_name, string pattern)')
@@ -353,6 +354,12 @@ elif cmd == 'create_table_with_constraints':
sys.exit(1)
pp.pprint(client.create_table_with_constraints(eval(args[0]),eval(args[1]),eval(args[2]),))
+elif cmd == 'drop_constraint':
+ if len(args) != 1:
+ print('drop_constraint requires 1 args')
+ sys.exit(1)
+ pp.pprint(client.drop_constraint(eval(args[0]),))
+
elif cmd == 'drop_table':
if len(args) != 3:
print('drop_table requires 3 args')
http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
index 119a5f1..bf8d383 100644
--- a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
+++ b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
@@ -165,6 +165,13 @@ class Iface(fb303.FacebookService.Iface):
"""
pass
+ def drop_constraint(self, req):
+ """
+ Parameters:
+ - req
+ """
+ pass
+
def drop_table(self, dbname, name, deleteData):
"""
Parameters:
@@ -1875,6 +1882,39 @@ class Client(fb303.FacebookService.Client, Iface):
raise result.o4
return
+ def drop_constraint(self, req):
+ """
+ Parameters:
+ - req
+ """
+ self.send_drop_constraint(req)
+ self.recv_drop_constraint()
+
+ def send_drop_constraint(self, req):
+ self._oprot.writeMessageBegin('drop_constraint', TMessageType.CALL, self._seqid)
+ args = drop_constraint_args()
+ args.req = req
+ args.write(self._oprot)
+ self._oprot.writeMessageEnd()
+ self._oprot.trans.flush()
+
+ def recv_drop_constraint(self):
+ iprot = self._iprot
+ (fname, mtype, rseqid) = iprot.readMessageBegin()
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ raise x
+ result = drop_constraint_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.o1 is not None:
+ raise result.o1
+ if result.o3 is not None:
+ raise result.o3
+ return
+
def drop_table(self, dbname, name, deleteData):
"""
Parameters:
@@ -6499,6 +6539,7 @@ class Processor(fb303.FacebookService.Processor, Iface, TProcessor):
self._processMap["create_table"] = Processor.process_create_table
self._processMap["create_table_with_environment_context"] = Processor.process_create_table_with_environment_context
self._processMap["create_table_with_constraints"] = Processor.process_create_table_with_constraints
+ self._processMap["drop_constraint"] = Processor.process_drop_constraint
self._processMap["drop_table"] = Processor.process_drop_table
self._processMap["drop_table_with_environment_context"] = Processor.process_drop_table_with_environment_context
self._processMap["get_tables"] = Processor.process_get_tables
@@ -7141,6 +7182,31 @@ class Processor(fb303.FacebookService.Processor, Iface, TProcessor):
oprot.writeMessageEnd()
oprot.trans.flush()
+ def process_drop_constraint(self, seqid, iprot, oprot):
+ args = drop_constraint_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = drop_constraint_result()
+ try:
+ self._handler.drop_constraint(args.req)
+ msg_type = TMessageType.REPLY
+ except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
+ raise
+ except NoSuchObjectException as o1:
+ msg_type = TMessageType.REPLY
+ result.o1 = o1
+ except MetaException as o3:
+ msg_type = TMessageType.REPLY
+ result.o3 = o3
+ except Exception as ex:
+ msg_type = TMessageType.EXCEPTION
+ logging.exception(ex)
+ result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+ oprot.writeMessageBegin("drop_constraint", msg_type, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
def process_drop_table(self, seqid, iprot, oprot):
args = drop_table_args()
args.read(iprot)
@@ -13467,6 +13533,152 @@ class create_table_with_constraints_result:
def __ne__(self, other):
return not (self == other)
+class drop_constraint_args:
+ """
+ Attributes:
+ - req
+ """
+
+ thrift_spec = (
+ None, # 0
+ (1, TType.STRUCT, 'req', (DropConstraintRequest, DropConstraintRequest.thrift_spec), None, ), # 1
+ )
+
+ def __init__(self, req=None,):
+ self.req = req
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.STRUCT:
+ self.req = DropConstraintRequest()
+ self.req.read(iprot)
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('drop_constraint_args')
+ if self.req is not None:
+ oprot.writeFieldBegin('req', TType.STRUCT, 1)
+ self.req.write(oprot)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+
+ def __hash__(self):
+ value = 17
+ value = (value * 31) ^ hash(self.req)
+ return value
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+class drop_constraint_result:
+ """
+ Attributes:
+ - o1
+ - o3
+ """
+
+ thrift_spec = (
+ None, # 0
+ (1, TType.STRUCT, 'o1', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 1
+ (2, TType.STRUCT, 'o3', (MetaException, MetaException.thrift_spec), None, ), # 2
+ )
+
+ def __init__(self, o1=None, o3=None,):
+ self.o1 = o1
+ self.o3 = o3
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.STRUCT:
+ self.o1 = NoSuchObjectException()
+ self.o1.read(iprot)
+ else:
+ iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.STRUCT:
+ self.o3 = MetaException()
+ self.o3.read(iprot)
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('drop_constraint_result')
+ if self.o1 is not None:
+ oprot.writeFieldBegin('o1', TType.STRUCT, 1)
+ self.o1.write(oprot)
+ oprot.writeFieldEnd()
+ if self.o3 is not None:
+ oprot.writeFieldBegin('o3', TType.STRUCT, 2)
+ self.o3.write(oprot)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+
+ def __hash__(self):
+ value = 17
+ value = (value * 31) ^ hash(self.o1)
+ value = (value * 31) ^ hash(self.o3)
+ return value
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
class drop_table_args:
"""
Attributes:
http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py b/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
index f008788..8e0cb71 100644
--- a/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
+++ b/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
@@ -6264,6 +6264,103 @@ class ForeignKeysResponse:
def __ne__(self, other):
return not (self == other)
+class DropConstraintRequest:
+ """
+ Attributes:
+ - dbname
+ - tablename
+ - constraintname
+ """
+
+ thrift_spec = (
+ None, # 0
+ (1, TType.STRING, 'dbname', None, None, ), # 1
+ (2, TType.STRING, 'tablename', None, None, ), # 2
+ (3, TType.STRING, 'constraintname', None, None, ), # 3
+ )
+
+ def __init__(self, dbname=None, tablename=None, constraintname=None,):
+ self.dbname = dbname
+ self.tablename = tablename
+ self.constraintname = constraintname
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.STRING:
+ self.dbname = iprot.readString()
+ else:
+ iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.STRING:
+ self.tablename = iprot.readString()
+ else:
+ iprot.skip(ftype)
+ elif fid == 3:
+ if ftype == TType.STRING:
+ self.constraintname = iprot.readString()
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('DropConstraintRequest')
+ if self.dbname is not None:
+ oprot.writeFieldBegin('dbname', TType.STRING, 1)
+ oprot.writeString(self.dbname)
+ oprot.writeFieldEnd()
+ if self.tablename is not None:
+ oprot.writeFieldBegin('tablename', TType.STRING, 2)
+ oprot.writeString(self.tablename)
+ oprot.writeFieldEnd()
+ if self.constraintname is not None:
+ oprot.writeFieldBegin('constraintname', TType.STRING, 3)
+ oprot.writeString(self.constraintname)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ if self.dbname is None:
+ raise TProtocol.TProtocolException(message='Required field dbname is unset!')
+ if self.tablename is None:
+ raise TProtocol.TProtocolException(message='Required field tablename is unset!')
+ if self.constraintname is None:
+ raise TProtocol.TProtocolException(message='Required field constraintname is unset!')
+ return
+
+
+ def __hash__(self):
+ value = 17
+ value = (value * 31) ^ hash(self.dbname)
+ value = (value * 31) ^ hash(self.tablename)
+ value = (value * 31) ^ hash(self.constraintname)
+ return value
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
class PartitionsByExprResult:
"""
Attributes:
http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb b/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
index 4a24a19..4d3e49d 100644
--- a/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
+++ b/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
@@ -1390,6 +1390,29 @@ class ForeignKeysResponse
::Thrift::Struct.generate_accessors self
end
+class DropConstraintRequest
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ DBNAME = 1
+ TABLENAME = 2
+ CONSTRAINTNAME = 3
+
+ FIELDS = {
+ DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbname'},
+ TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tablename'},
+ CONSTRAINTNAME => {:type => ::Thrift::Types::STRING, :name => 'constraintname'}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field dbname is unset!') unless @dbname
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tablename is unset!') unless @tablename
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field constraintname is unset!') unless @constraintname
+ end
+
+ ::Thrift::Struct.generate_accessors self
+end
+
class PartitionsByExprResult
include ::Thrift::Struct, ::Thrift::Struct_Union
PARTITIONS = 1
http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb b/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
index 99a764e..61d1832 100644
--- a/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
+++ b/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
@@ -336,6 +336,22 @@ module ThriftHiveMetastore
return
end
+ def drop_constraint(req)
+ send_drop_constraint(req)
+ recv_drop_constraint()
+ end
+
+ def send_drop_constraint(req)
+ send_message('drop_constraint', Drop_constraint_args, :req => req)
+ end
+
+ def recv_drop_constraint()
+ result = receive_message(Drop_constraint_result)
+ raise result.o1 unless result.o1.nil?
+ raise result.o3 unless result.o3.nil?
+ return
+ end
+
def drop_table(dbname, name, deleteData)
send_drop_table(dbname, name, deleteData)
recv_drop_table()
@@ -2704,6 +2720,19 @@ module ThriftHiveMetastore
write_result(result, oprot, 'create_table_with_constraints', seqid)
end
+ def process_drop_constraint(seqid, iprot, oprot)
+ args = read_args(iprot, Drop_constraint_args)
+ result = Drop_constraint_result.new()
+ begin
+ @handler.drop_constraint(args.req)
+ rescue ::NoSuchObjectException => o1
+ result.o1 = o1
+ rescue ::MetaException => o3
+ result.o3 = o3
+ end
+ write_result(result, oprot, 'drop_constraint', seqid)
+ end
+
def process_drop_table(seqid, iprot, oprot)
args = read_args(iprot, Drop_table_args)
result = Drop_table_result.new()
@@ -4954,6 +4983,40 @@ module ThriftHiveMetastore
::Thrift::Struct.generate_accessors self
end
+ class Drop_constraint_args
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ REQ = 1
+
+ FIELDS = {
+ REQ => {:type => ::Thrift::Types::STRUCT, :name => 'req', :class => ::DropConstraintRequest}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+ end
+
+ class Drop_constraint_result
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ O1 = 1
+ O3 = 2
+
+ FIELDS = {
+ O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::NoSuchObjectException},
+ O3 => {:type => ::Thrift::Types::STRUCT, :name => 'o3', :class => ::MetaException}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+ end
+
class Drop_table_args
include ::Thrift::Struct, ::Thrift::Struct_Union
DBNAME = 1
http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index 4ada9c1..9a09e7a 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@ -1483,6 +1483,35 @@ public class HiveMetaStore extends ThriftHiveMetastore {
endFunction("create_table", success, ex, tbl.getTableName());
}
}
+
+ @Override
+ public void drop_constraint(DropConstraintRequest req)
+ throws MetaException, InvalidObjectException {
+ String dbName = req.getDbname();
+ String tableName = req.getTablename();
+ String constraintName = req.getConstraintname();
+ startFunction("drop_constraint", ": " + constraintName.toString());
+ boolean success = false;
+ Exception ex = null;
+ try {
+ getMS().dropConstraint(dbName, tableName, constraintName);
+ success = true;
+ } catch (NoSuchObjectException e) {
+ ex = e;
+ throw new InvalidObjectException(e.getMessage());
+ } catch (Exception e) {
+ ex = e;
+ if (e instanceof MetaException) {
+ throw (MetaException) e;
+ } else if (e instanceof InvalidObjectException) {
+ throw (InvalidObjectException) e;
+ } else {
+ throw newMetaException(e);
+ }
+ } finally {
+ endFunction("drop_constraint", success, ex, constraintName);
+ }
+ }
private boolean is_table_exists(RawStore ms, String dbname, String name)
throws MetaException {
return (ms.getTable(dbname, name) != null);
http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
index 7d37d07..75fea5b 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.hive.metastore.api.CompactionType;
import org.apache.hadoop.hive.metastore.api.ConfigValSecurityException;
import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.DropConstraintRequest;
import org.apache.hadoop.hive.metastore.api.DropPartitionsExpr;
import org.apache.hadoop.hive.metastore.api.DropPartitionsRequest;
import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
@@ -765,6 +766,11 @@ public class HiveMetaStoreClient implements IMetaStoreClient {
}
}
+ @Override
+ public void dropConstraint(String dbName, String tableName, String constraintName) throws
+ NoSuchObjectException, MetaException, TException {
+ client.drop_constraint(new DropConstraintRequest(dbName, tableName, constraintName));
+ }
/**
* @param type
http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
index c900a2d..3965475 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
@@ -1570,4 +1570,7 @@ public interface IMetaStoreClient {
List<SQLPrimaryKey> primaryKeys, List<SQLForeignKey> foreignKeys)
throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, TException;
+ void dropConstraint(String dbName, String tableName, String constraintName) throws
+ MetaException, NoSuchObjectException, TException;
+
}
http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
index f651a13..5c49be9 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
@@ -1025,7 +1025,8 @@ public class ObjectStore implements RawStore, Configurable {
" table " + tableName + " record to delete");
}
- List<MConstraint> tabConstraints = listAllTableConstraints(dbName, tableName);
+ List<MConstraint> tabConstraints = listAllTableConstraintsWithOptionalConstraintName(
+ dbName, tableName, null);
if (tabConstraints != null && tabConstraints.size() > 0) {
pm.deletePersistentAll(tabConstraints);
}
@@ -1043,19 +1044,27 @@ public class ObjectStore implements RawStore, Configurable {
return success;
}
- private List<MConstraint> listAllTableConstraints(String dbName, String tableName) {
+ private List<MConstraint> listAllTableConstraintsWithOptionalConstraintName
+ (String dbName, String tableName, String constraintname) {
List<MConstraint> mConstraints = null;
List<String> constraintNames = new ArrayList<String>();
Query query = null;
try {
query = pm.newQuery("select constraintName from org.apache.hadoop.hive.metastore.model.MConstraint where "
- + "(parentTable.tableName == ptblname && parentTable.database.name == pdbname) || "
- + "(childTable != null && childTable.tableName == ctblname && childTable.database.name == cdbname)");
+ + "((parentTable.tableName == ptblname && parentTable.database.name == pdbname) || "
+ + "(childTable != null && childTable.tableName == ctblname && "
+ + "childTable.database.name == cdbname)) " + (constraintname != null ?
+ " && constraintName == constraintname" : ""));
query.declareParameters("java.lang.String ptblname, java.lang.String pdbname,"
- + "java.lang.String ctblname, java.lang.String cdbname");
- Collection<?> constraintNamesColl = (Collection<?>) query.
- executeWithArray(tableName, dbName, tableName, dbName);
+ + "java.lang.String ctblname, java.lang.String cdbname" +
+ (constraintname != null ? ", java.lang.String constraintname" : ""));
+ Collection<?> constraintNamesColl =
+ constraintname != null ?
+ ((Collection<?>) query.
+ executeWithArray(tableName, dbName, tableName, dbName, constraintname)):
+ ((Collection<?>) query.
+ executeWithArray(tableName, dbName, tableName, dbName));
for (Iterator<?> i = constraintNamesColl.iterator(); i.hasNext();) {
String currName = (String) i.next();
constraintNames.add(currName);
@@ -8389,4 +8398,27 @@ public class ObjectStore implements RawStore, Configurable {
return foreignKeys;
}
+ @Override
+ public void dropConstraint(String dbName, String tableName,
+ String constraintName) throws NoSuchObjectException {
+ boolean success = false;
+ try {
+ openTransaction();
+
+ List<MConstraint> tabConstraints = listAllTableConstraintsWithOptionalConstraintName(
+ dbName, tableName, constraintName);
+ if (tabConstraints != null && tabConstraints.size() > 0) {
+ pm.deletePersistentAll(tabConstraints);
+ } else {
+ throw new NoSuchObjectException("The constraint: " + constraintName +
+ " does not exist for the associated table: " + dbName + "." + tableName);
+ }
+ success = commitTransaction();
+ } finally {
+ if (!success) {
+ rollbackTransaction();
+ }
+ }
+ }
+
}
http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
index 100c396..06b8135 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
@@ -675,4 +675,6 @@ public interface RawStore extends Configurable {
void createTableWithConstraints(Table tbl, List<SQLPrimaryKey> primaryKeys,
List<SQLForeignKey> foreignKeys) throws InvalidObjectException, MetaException;
+
+ void dropConstraint(String dbName, String tableName, String constraintName) throws NoSuchObjectException;
}
http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java
index d4e5da4..ec5b92c 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java
@@ -2615,4 +2615,10 @@ public class HBaseStore implements RawStore {
throws InvalidObjectException, MetaException {
// TODO Auto-generated method stub
}
+
+ @Override
+ public void dropConstraint(String dbName, String tableName,
+ String constraintName) throws NoSuchObjectException {
+ // TODO Auto-generated method stub
+ }
}
http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
----------------------------------------------------------------------
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
index 86e7bea..63fcb28 100644
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
+++ b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
@@ -844,4 +844,10 @@ public class DummyRawStoreControlledCommit implements RawStore, Configurable {
throws InvalidObjectException, MetaException {
// TODO Auto-generated method stub
}
+
+ @Override
+ public void dropConstraint(String dbName, String tableName,
+ String constraintName) throws NoSuchObjectException {
+ // TODO Auto-generated method stub
+ }
}
http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
----------------------------------------------------------------------
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
index 5b32f00..386c70a 100644
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
+++ b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
@@ -860,6 +860,12 @@ public class DummyRawStoreForJdoConnection implements RawStore {
throws InvalidObjectException, MetaException {
// TODO Auto-generated method stub
}
+
+ @Override
+ public void dropConstraint(String dbName, String tableName,
+ String constraintName) throws NoSuchObjectException {
+ // TODO Auto-generated method stub
+ }
}
http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
index 9887d77..c4d3bfb 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
@@ -131,6 +131,7 @@ import org.apache.hadoop.hive.ql.parse.AlterTablePartMergeFilesDesc;
import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer;
import org.apache.hadoop.hive.ql.parse.DDLSemanticAnalyzer;
import org.apache.hadoop.hive.ql.parse.ReplicationSpec;
+import org.apache.hadoop.hive.ql.parse.SemanticException;
import org.apache.hadoop.hive.ql.plan.AddPartitionDesc;
import org.apache.hadoop.hive.ql.plan.AlterDatabaseDesc;
import org.apache.hadoop.hive.ql.plan.AlterIndexDesc;
@@ -356,7 +357,11 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
AlterTableDesc alterTbl = work.getAlterTblDesc();
if (alterTbl != null) {
- return alterTable(db, alterTbl);
+ if (alterTbl.getOp() == AlterTableTypes.DROPCONSTRAINT ) {
+ return dropConstraint(db, alterTbl);
+ } else {
+ return alterTable(db, alterTbl);
+ }
}
CreateViewDesc crtView = work.getCreateViewDesc();
@@ -3596,7 +3601,19 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
return 0;
}
- /**
+ private int dropConstraint(Hive db, AlterTableDesc alterTbl)
+ throws SemanticException, HiveException {
+ try {
+ db.dropConstraint(Utilities.getDatabaseName(alterTbl.getOldName()),
+ Utilities.getTableName(alterTbl.getOldName()),
+ alterTbl.getConstraintName());
+ } catch (NoSuchObjectException e) {
+ throw new HiveException(e);
+ }
+ return 0;
+ }
+
+ /**
* Drop a given table or some partitions. DropTableDesc is currently used for both.
*
* @param db
http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java
index 515f8b2..2194a6d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java
@@ -203,7 +203,8 @@ public class WriteEntity extends Entity implements Serializable {
case ADDCOLS:
case RENAME:
case TRUNCATE:
- case MERGEFILES: return WriteType.DDL_EXCLUSIVE;
+ case MERGEFILES:
+ case DROPCONSTRAINT: return WriteType.DDL_EXCLUSIVE;
case ADDPARTITION:
case ADDSERDEPROPS:
http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index 26c458c..6862f70 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -3593,4 +3593,13 @@ private void constructOneLBLocationMap(FileStatus fSta,
}
}
+ public void dropConstraint(String dbName, String tableName, String constraintName)
+ throws HiveException, NoSuchObjectException {
+ try {
+ getMSC().dropConstraint(dbName, tableName, constraintName);
+ } catch (Exception e) {
+ throw new HiveException(e);
+ }
+ }
+
};
http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
index 04e2a41..4a6617f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
@@ -26,7 +26,6 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.common.FileUtils;
import org.apache.hadoop.hive.common.JavaUtils;
import org.apache.hadoop.hive.common.StatsSetupConst;
import org.apache.hadoop.hive.conf.HiveConf;
@@ -88,7 +87,6 @@ import org.apache.hadoop.hive.ql.plan.ColumnStatsDesc;
import org.apache.hadoop.hive.ql.plan.ColumnStatsUpdateWork;
import org.apache.hadoop.hive.ql.plan.CreateDatabaseDesc;
import org.apache.hadoop.hive.ql.plan.CreateIndexDesc;
-import org.apache.hadoop.hive.ql.plan.DDLDesc;
import org.apache.hadoop.hive.ql.plan.DDLWork;
import org.apache.hadoop.hive.ql.plan.DescDatabaseDesc;
import org.apache.hadoop.hive.ql.plan.DescFunctionDesc;
@@ -321,6 +319,8 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
analyzeAlterTableCompact(ast, tableName, partSpec);
} else if(ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_UPDATECOLSTATS){
analyzeAlterTableUpdateStats(ast, tableName, partSpec);
+ } else if(ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_DROPCONSTRAINT) {
+ analyzeAlterTableDropConstraint(ast, tableName);
}
break;
}
@@ -1740,6 +1740,15 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc), conf));
}
+ private void analyzeAlterTableDropConstraint(ASTNode ast, String tableName)
+ throws SemanticException {
+ String dropConstraintName = unescapeIdentifier(ast.getChild(0).getText());
+ AlterTableDesc alterTblDesc = new AlterTableDesc(tableName, dropConstraintName);
+
+ rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
+ alterTblDesc), conf));
+ }
+
static HashMap<String, String> getProps(ASTNode prop) {
// Must be deterministic order map for consistent q-test output across Java versions
HashMap<String, String> mapProp = new LinkedHashMap<String, String>();
http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
index 6531b03..2c66396 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
@@ -179,6 +179,7 @@ TOK_ALTERTABLE_SKEWED_LOCATION;
TOK_ALTERTABLE_BUCKETS;
TOK_ALTERTABLE_CLUSTER_SORT;
TOK_ALTERTABLE_COMPACT;
+TOK_ALTERTABLE_DROPCONSTRAINT;
TOK_ALTERINDEX_REBUILD;
TOK_ALTERINDEX_PROPERTIES;
TOK_MSCK;
@@ -1040,6 +1041,7 @@ alterTableStatementSuffix
| alterStatementSuffixSkewedby
| alterStatementSuffixExchangePartition
| alterStatementPartitionKeyType
+ | alterStatementSuffixDropConstraint
| partitionSpec? alterTblPartitionStatementSuffix -> alterTblPartitionStatementSuffix partitionSpec?
;
@@ -1129,6 +1131,13 @@ alterStatementSuffixAddCol
-> ^(TOK_ALTERTABLE_REPLACECOLS columnNameTypeList restrictOrCascade?)
;
+alterStatementSuffixDropConstraint
+@init { pushMsg("drop constraint statement", state); }
+@after { popMsg(state); }
+ : KW_DROP KW_CONSTRAINT cName=identifier
+ ->^(TOK_ALTERTABLE_DROPCONSTRAINT $cName)
+ ;
+
alterStatementSuffixRenameCol
@init { pushMsg("rename column name", state); }
@after { popMsg(state); }
http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java
index fb8a33c..7b83381 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java
@@ -62,6 +62,7 @@ public final class SemanticAnalyzerFactory {
commandType.put(HiveParser.TOK_ALTERTABLE_PROPERTIES, HiveOperation.ALTERTABLE_PROPERTIES);
commandType.put(HiveParser.TOK_ALTERTABLE_DROPPROPERTIES, HiveOperation.ALTERTABLE_PROPERTIES);
commandType.put(HiveParser.TOK_ALTERTABLE_EXCHANGEPARTITION, HiveOperation.ALTERTABLE_EXCHANGEPARTITION);
+ commandType.put(HiveParser.TOK_ALTERTABLE_DROPCONSTRAINT, HiveOperation.ALTERTABLE_DROPCONSTRAINT);
commandType.put(HiveParser.TOK_SHOWDATABASES, HiveOperation.SHOWDATABASES);
commandType.put(HiveParser.TOK_SHOWTABLES, HiveOperation.SHOWTABLES);
commandType.put(HiveParser.TOK_SHOWCOLUMNS, HiveOperation.SHOWCOLUMNS);
@@ -195,6 +196,7 @@ public final class SemanticAnalyzerFactory {
case HiveParser.TOK_ALTERTABLE_DROPPROPERTIES:
case HiveParser.TOK_ALTERTABLE_EXCHANGEPARTITION:
case HiveParser.TOK_ALTERTABLE_SKEWED:
+ case HiveParser.TOK_ALTERTABLE_DROPCONSTRAINT:
queryState.setCommandType(commandType.get(child.getType()));
return new DDLSemanticAnalyzer(queryState);
}
http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java
index 4ba51ec..38d8d5a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java
@@ -56,7 +56,7 @@ public class AlterTableDesc extends DDLDesc implements Serializable {
DROPPARTITION("drop partition"), RENAMEPARTITION("rename partition"), ADDSKEWEDBY("add skew column"),
ALTERSKEWEDLOCATION("alter skew location"), ALTERBUCKETNUM("alter bucket number"),
ALTERPARTITION("alter partition"), COMPACT("compact"),
- TRUNCATE("truncate"), MERGEFILES("merge files");
+ TRUNCATE("truncate"), MERGEFILES("merge files"), DROPCONSTRAINT("drop constraint");
;
private final String name;
@@ -116,6 +116,7 @@ public class AlterTableDesc extends DDLDesc implements Serializable {
boolean isTurnOffSorting = false;
boolean isCascade = false;
EnvironmentContext environmentContext;
+ String dropConstraintName;
public AlterTableDesc() {
}
@@ -263,6 +264,12 @@ public class AlterTableDesc extends DDLDesc implements Serializable {
this.numberBuckets = numBuckets;
}
+ public AlterTableDesc(String tableName, String dropConstraintName) {
+ this.oldName = tableName;
+ this.dropConstraintName = dropConstraintName;
+ op = AlterTableTypes.DROPCONSTRAINT;
+ }
+
@Explain(displayName = "new columns", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
public List<String> getNewColsString() {
return Utilities.getFieldSchemaString(getNewCols());
@@ -408,6 +415,22 @@ public class AlterTableDesc extends DDLDesc implements Serializable {
}
/**
+ * @return the drop constraint name of the table
+ */
+ @Explain(displayName = "drop constraint name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
+ public String getConstraintName() {
+ return dropConstraintName;
+ }
+
+ /**
+ * @param constraintName
+ * the dropConstraintName to set
+ */
+ public void setDropConstraintName(String constraintName) {
+ this.dropConstraintName = constraintName;
+ }
+
+ /**
* @param storageHandler
* the storage handler to set
*/
http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java
index 188cd6f..e651016 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java
@@ -115,6 +115,8 @@ public enum HiveOperation {
new Privilege[] {Privilege.ALTER_DATA}, null),
ALTERTABLE_PARTCOLTYPE("ALTERTABLE_PARTCOLTYPE", new Privilege[] { Privilege.SELECT }, new Privilege[] { Privilege.ALTER_DATA }),
ALTERTABLE_EXCHANGEPARTITION("ALTERTABLE_EXCHANGEPARTITION", null, null),
+ ALTERTABLE_DROPCONSTRAINT("ALTERTABLE_DROPCONSTRAINT",
+ new Privilege[]{Privilege.ALTER_METADATA}, null),
ALTERVIEW_RENAME("ALTERVIEW_RENAME", new Privilege[] {Privilege.ALTER_METADATA}, null),
ALTERVIEW_AS("ALTERVIEW_AS", new Privilege[] {Privilege.ALTER_METADATA}, null),
ALTERTABLE_COMPACT("ALTERTABLE_COMPACT", new Privilege[]{Privilege.SELECT}, new Privilege[]{Privilege.ALTER_DATA}),
http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/ql/src/test/queries/clientnegative/drop_invalid_constraint1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/drop_invalid_constraint1.q b/ql/src/test/queries/clientnegative/drop_invalid_constraint1.q
new file mode 100644
index 0000000..2055f9e
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/drop_invalid_constraint1.q
@@ -0,0 +1,3 @@
+CREATE TABLE table1 (a STRING, b STRING, constraint pk1 primary key (a) disable novalidate);
+ALTER TABLE table1 DROP CONSTRAINT pk1;
+ALTER TABLE table1 DROP CONSTRAINT pk1;
http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/ql/src/test/queries/clientnegative/drop_invalid_constraint2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/drop_invalid_constraint2.q b/ql/src/test/queries/clientnegative/drop_invalid_constraint2.q
new file mode 100644
index 0000000..d253617
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/drop_invalid_constraint2.q
@@ -0,0 +1,2 @@
+CREATE TABLE table2 (a STRING, b STRING, constraint pk1 primary key (a) disable novalidate);
+ALTER TABLE table1 DROP CONSTRAINT pk1;
http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/ql/src/test/queries/clientnegative/drop_invalid_constraint3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/drop_invalid_constraint3.q b/ql/src/test/queries/clientnegative/drop_invalid_constraint3.q
new file mode 100644
index 0000000..04eb1fb
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/drop_invalid_constraint3.q
@@ -0,0 +1,2 @@
+CREATE TABLE table2 (a STRING, b STRING, constraint pk1 primary key (a) disable novalidate);
+ALTER TABLE table2 DROP CONSTRAINT pk2;
http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/ql/src/test/queries/clientnegative/drop_invalid_constraint4.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/drop_invalid_constraint4.q b/ql/src/test/queries/clientnegative/drop_invalid_constraint4.q
new file mode 100644
index 0000000..3cf2d2a
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/drop_invalid_constraint4.q
@@ -0,0 +1,3 @@
+CREATE TABLE table1 (a STRING, b STRING, constraint pk1 primary key (a) disable novalidate);
+CREATE TABLE table2 (a STRING, b STRING, constraint pk2 primary key (a) disable novalidate);
+ALTER TABLE table1 DROP CONSTRAINT pk2;
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/ql/src/test/queries/clientpositive/create_with_constraints.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/create_with_constraints.q b/ql/src/test/queries/clientpositive/create_with_constraints.q
index eef0c64..0bb92e4 100644
--- a/ql/src/test/queries/clientpositive/create_with_constraints.q
+++ b/ql/src/test/queries/clientpositive/create_with_constraints.q
@@ -8,5 +8,17 @@ CREATE TABLE table6 (x string, y string, PRIMARY KEY (x) disable novalidate, FOR
CONSTRAINT fk4 FOREIGN KEY (y) REFERENCES table1(a) DISABLE NOVALIDATE);
CREATE TABLE table7 (a STRING, b STRING, primary key (a) disable novalidate rely);
CREATE TABLE table8 (a STRING, b STRING, constraint pk8 primary key (a) disable novalidate norely);
+CREATE TABLE table9 (a STRING, b STRING, primary key (a, b) disable novalidate rely);
+CREATE TABLE table10 (a STRING, b STRING, constraint pk10 primary key (a) disable novalidate norely, foreign key (a, b) references table9(a, b) disable novalidate);
+CREATE TABLE table11 (a STRING, b STRING, c STRING, constraint pk11 primary key (a) disable novalidate rely, foreign key (a, b) references table9(a, b) disable novalidate,
+foreign key (c) references table4(x) disable novalidate);
+ALTER TABLE table2 DROP CONSTRAINT pk1;
+ALTER TABLE table3 DROP CONSTRAINT fk1;
+ALTER TABLE table6 DROP CONSTRAINT fk4;
+CREATE DATABASE dbconstraint;
+USE dbconstraint;
+CREATE TABLE table2 (a STRING, b STRING, constraint pk1 primary key (a) disable novalidate);
+USE default;
+ALTER TABLE dbconstraint.table2 DROP CONSTRAINT pk1;
http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/ql/src/test/results/clientnegative/drop_invalid_constraint1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/drop_invalid_constraint1.q.out b/ql/src/test/results/clientnegative/drop_invalid_constraint1.q.out
new file mode 100644
index 0000000..4568ccb
--- /dev/null
+++ b/ql/src/test/results/clientnegative/drop_invalid_constraint1.q.out
@@ -0,0 +1,15 @@
+PREHOOK: query: CREATE TABLE table1 (a STRING, b STRING, constraint pk1 primary key (a) disable novalidate)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@table1
+POSTHOOK: query: CREATE TABLE table1 (a STRING, b STRING, constraint pk1 primary key (a) disable novalidate)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@table1
+PREHOOK: query: ALTER TABLE table1 DROP CONSTRAINT pk1
+PREHOOK: type: ALTERTABLE_DROPCONSTRAINT
+POSTHOOK: query: ALTER TABLE table1 DROP CONSTRAINT pk1
+POSTHOOK: type: ALTERTABLE_DROPCONSTRAINT
+PREHOOK: query: ALTER TABLE table1 DROP CONSTRAINT pk1
+PREHOOK: type: ALTERTABLE_DROPCONSTRAINT
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. InvalidObjectException(message:The constraint: pk1 does not exist for the associated table: default.table1)
http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/ql/src/test/results/clientnegative/drop_invalid_constraint2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/drop_invalid_constraint2.q.out b/ql/src/test/results/clientnegative/drop_invalid_constraint2.q.out
new file mode 100644
index 0000000..0051131
--- /dev/null
+++ b/ql/src/test/results/clientnegative/drop_invalid_constraint2.q.out
@@ -0,0 +1,11 @@
+PREHOOK: query: CREATE TABLE table2 (a STRING, b STRING, constraint pk1 primary key (a) disable novalidate)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@table2
+POSTHOOK: query: CREATE TABLE table2 (a STRING, b STRING, constraint pk1 primary key (a) disable novalidate)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@table2
+PREHOOK: query: ALTER TABLE table1 DROP CONSTRAINT pk1
+PREHOOK: type: ALTERTABLE_DROPCONSTRAINT
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. InvalidObjectException(message:The constraint: pk1 does not exist for the associated table: default.table1)
http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/ql/src/test/results/clientnegative/drop_invalid_constraint3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/drop_invalid_constraint3.q.out b/ql/src/test/results/clientnegative/drop_invalid_constraint3.q.out
new file mode 100644
index 0000000..9c60e94
--- /dev/null
+++ b/ql/src/test/results/clientnegative/drop_invalid_constraint3.q.out
@@ -0,0 +1,11 @@
+PREHOOK: query: CREATE TABLE table2 (a STRING, b STRING, constraint pk1 primary key (a) disable novalidate)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@table2
+POSTHOOK: query: CREATE TABLE table2 (a STRING, b STRING, constraint pk1 primary key (a) disable novalidate)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@table2
+PREHOOK: query: ALTER TABLE table2 DROP CONSTRAINT pk2
+PREHOOK: type: ALTERTABLE_DROPCONSTRAINT
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. InvalidObjectException(message:The constraint: pk2 does not exist for the associated table: default.table2)
http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/ql/src/test/results/clientnegative/drop_invalid_constraint4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/drop_invalid_constraint4.q.out b/ql/src/test/results/clientnegative/drop_invalid_constraint4.q.out
new file mode 100644
index 0000000..1d93c42
--- /dev/null
+++ b/ql/src/test/results/clientnegative/drop_invalid_constraint4.q.out
@@ -0,0 +1,19 @@
+PREHOOK: query: CREATE TABLE table1 (a STRING, b STRING, constraint pk1 primary key (a) disable novalidate)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@table1
+POSTHOOK: query: CREATE TABLE table1 (a STRING, b STRING, constraint pk1 primary key (a) disable novalidate)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@table1
+PREHOOK: query: CREATE TABLE table2 (a STRING, b STRING, constraint pk2 primary key (a) disable novalidate)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@table2
+POSTHOOK: query: CREATE TABLE table2 (a STRING, b STRING, constraint pk2 primary key (a) disable novalidate)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@table2
+PREHOOK: query: ALTER TABLE table1 DROP CONSTRAINT pk2
+PREHOOK: type: ALTERTABLE_DROPCONSTRAINT
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. InvalidObjectException(message:The constraint: pk2 does not exist for the associated table: default.table1)
http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/ql/src/test/results/clientpositive/create_with_constraints.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/create_with_constraints.q.out b/ql/src/test/results/clientpositive/create_with_constraints.q.out
index 5cf8d83..7a7a50a 100644
--- a/ql/src/test/results/clientpositive/create_with_constraints.q.out
+++ b/ql/src/test/results/clientpositive/create_with_constraints.q.out
@@ -66,3 +66,71 @@ POSTHOOK: query: CREATE TABLE table8 (a STRING, b STRING, constraint pk8 primary
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@table8
+PREHOOK: query: CREATE TABLE table9 (a STRING, b STRING, primary key (a, b) disable novalidate rely)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@table9
+POSTHOOK: query: CREATE TABLE table9 (a STRING, b STRING, primary key (a, b) disable novalidate rely)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@table9
+PREHOOK: query: CREATE TABLE table10 (a STRING, b STRING, constraint pk10 primary key (a) disable novalidate norely, foreign key (a, b) references table9(a, b) disable novalidate)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@table10
+POSTHOOK: query: CREATE TABLE table10 (a STRING, b STRING, constraint pk10 primary key (a) disable novalidate norely, foreign key (a, b) references table9(a, b) disable novalidate)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@table10
+PREHOOK: query: CREATE TABLE table11 (a STRING, b STRING, c STRING, constraint pk11 primary key (a) disable novalidate rely, foreign key (a, b) references table9(a, b) disable novalidate,
+foreign key (c) references table4(x) disable novalidate)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@table11
+POSTHOOK: query: CREATE TABLE table11 (a STRING, b STRING, c STRING, constraint pk11 primary key (a) disable novalidate rely, foreign key (a, b) references table9(a, b) disable novalidate,
+foreign key (c) references table4(x) disable novalidate)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@table11
+PREHOOK: query: ALTER TABLE table2 DROP CONSTRAINT pk1
+PREHOOK: type: ALTERTABLE_DROPCONSTRAINT
+POSTHOOK: query: ALTER TABLE table2 DROP CONSTRAINT pk1
+POSTHOOK: type: ALTERTABLE_DROPCONSTRAINT
+PREHOOK: query: ALTER TABLE table3 DROP CONSTRAINT fk1
+PREHOOK: type: ALTERTABLE_DROPCONSTRAINT
+POSTHOOK: query: ALTER TABLE table3 DROP CONSTRAINT fk1
+POSTHOOK: type: ALTERTABLE_DROPCONSTRAINT
+PREHOOK: query: ALTER TABLE table6 DROP CONSTRAINT fk4
+PREHOOK: type: ALTERTABLE_DROPCONSTRAINT
+POSTHOOK: query: ALTER TABLE table6 DROP CONSTRAINT fk4
+POSTHOOK: type: ALTERTABLE_DROPCONSTRAINT
+PREHOOK: query: CREATE DATABASE dbconstraint
+PREHOOK: type: CREATEDATABASE
+PREHOOK: Output: database:dbconstraint
+POSTHOOK: query: CREATE DATABASE dbconstraint
+POSTHOOK: type: CREATEDATABASE
+POSTHOOK: Output: database:dbconstraint
+PREHOOK: query: USE dbconstraint
+PREHOOK: type: SWITCHDATABASE
+PREHOOK: Input: database:dbconstraint
+POSTHOOK: query: USE dbconstraint
+POSTHOOK: type: SWITCHDATABASE
+POSTHOOK: Input: database:dbconstraint
+PREHOOK: query: CREATE TABLE table2 (a STRING, b STRING, constraint pk1 primary key (a) disable novalidate)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:dbconstraint
+PREHOOK: Output: dbconstraint@table2
+POSTHOOK: query: CREATE TABLE table2 (a STRING, b STRING, constraint pk1 primary key (a) disable novalidate)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:dbconstraint
+POSTHOOK: Output: dbconstraint@table2
+PREHOOK: query: USE default
+PREHOOK: type: SWITCHDATABASE
+PREHOOK: Input: database:default
+POSTHOOK: query: USE default
+POSTHOOK: type: SWITCHDATABASE
+POSTHOOK: Input: database:default
+PREHOOK: query: ALTER TABLE dbconstraint.table2 DROP CONSTRAINT pk1
+PREHOOK: type: ALTERTABLE_DROPCONSTRAINT
+POSTHOOK: query: ALTER TABLE dbconstraint.table2 DROP CONSTRAINT pk1
+POSTHOOK: type: ALTERTABLE_DROPCONSTRAINT
http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/service/src/gen/thrift/gen-py/__init__.py
----------------------------------------------------------------------
diff --git a/service/src/gen/thrift/gen-py/__init__.py b/service/src/gen/thrift/gen-py/__init__.py
deleted file mode 100644
index e69de29..0000000
[10/20] hive git commit: HIVE-13669 : LLAP: io.enabled config is
ignored on the server side (Sergey Shelukhin,
reviewed by Prasanth Jayachandran)
Posted by jd...@apache.org.
HIVE-13669 : LLAP: io.enabled config is ignored on the server side (Sergey Shelukhin, reviewed by Prasanth Jayachandran)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/652f88ad
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/652f88ad
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/652f88ad
Branch: refs/heads/llap
Commit: 652f88ad973ebe1668b5663617259795cc007953
Parents: 212077b
Author: Sergey Shelukhin <se...@apache.org>
Authored: Wed May 4 14:55:01 2016 -0700
Committer: Sergey Shelukhin <se...@apache.org>
Committed: Wed May 4 14:55:01 2016 -0700
----------------------------------------------------------------------
.../org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/652f88ad/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java
index d23a44a..e662de9 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java
@@ -322,8 +322,9 @@ public class LlapDaemon extends CompositeService implements ContainerRunner, Lla
fnLocalizer.init();
fnLocalizer.startLocalizeAllFunctions();
}
- LlapProxy.initializeLlapIo(conf);
-
+ if (isIoEnabled()) {
+ LlapProxy.initializeLlapIo(conf);
+ }
}
@Override
[17/20] hive git commit: HIVE-13639: CBO rule to pull up constants
through Union (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)
Posted by jd...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/09271872/ql/src/test/results/clientpositive/union_view.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union_view.q.out b/ql/src/test/results/clientpositive/union_view.q.out
index badd209..530739e 100644
--- a/ql/src/test/results/clientpositive/union_view.q.out
+++ b/ql/src/test/results/clientpositive/union_view.q.out
@@ -358,12 +358,12 @@ STAGE PLANS:
Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: value (type: string)
- outputColumnNames: _col1
+ outputColumnNames: _col0
Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
Union
Statistics: Num rows: 252 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: 86 (type: int), _col1 (type: string), '1' (type: string)
+ expressions: 86 (type: int), _col0 (type: string), '1' (type: string)
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 252 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
File Output Operator
@@ -382,12 +382,12 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
expressions: value (type: string)
- outputColumnNames: _col1
+ outputColumnNames: _col0
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Union
Statistics: Num rows: 252 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: 86 (type: int), _col1 (type: string), '1' (type: string)
+ expressions: 86 (type: int), _col0 (type: string), '1' (type: string)
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 252 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
File Output Operator
@@ -406,12 +406,12 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
expressions: value (type: string)
- outputColumnNames: _col1
+ outputColumnNames: _col0
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Union
Statistics: Num rows: 252 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: 86 (type: int), _col1 (type: string), '1' (type: string)
+ expressions: 86 (type: int), _col0 (type: string), '1' (type: string)
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 252 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
File Output Operator
@@ -471,12 +471,12 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
expressions: value (type: string)
- outputColumnNames: _col1
+ outputColumnNames: _col0
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Union
Statistics: Num rows: 502 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: 86 (type: int), _col1 (type: string), '2' (type: string)
+ expressions: 86 (type: int), _col0 (type: string), '2' (type: string)
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 502 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
File Output Operator
@@ -495,12 +495,12 @@ STAGE PLANS:
Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: value (type: string)
- outputColumnNames: _col1
+ outputColumnNames: _col0
Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Union
Statistics: Num rows: 502 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: 86 (type: int), _col1 (type: string), '2' (type: string)
+ expressions: 86 (type: int), _col0 (type: string), '2' (type: string)
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 502 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
File Output Operator
@@ -519,12 +519,12 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
expressions: value (type: string)
- outputColumnNames: _col1
+ outputColumnNames: _col0
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Union
Statistics: Num rows: 502 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: 86 (type: int), _col1 (type: string), '2' (type: string)
+ expressions: 86 (type: int), _col0 (type: string), '2' (type: string)
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 502 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
File Output Operator
@@ -584,12 +584,12 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
expressions: value (type: string)
- outputColumnNames: _col1
+ outputColumnNames: _col0
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Union
Statistics: Num rows: 502 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: 86 (type: int), _col1 (type: string), '3' (type: string)
+ expressions: 86 (type: int), _col0 (type: string), '3' (type: string)
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 502 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
File Output Operator
@@ -608,12 +608,12 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
expressions: value (type: string)
- outputColumnNames: _col1
+ outputColumnNames: _col0
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Union
Statistics: Num rows: 502 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: 86 (type: int), _col1 (type: string), '3' (type: string)
+ expressions: 86 (type: int), _col0 (type: string), '3' (type: string)
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 502 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
File Output Operator
@@ -632,12 +632,12 @@ STAGE PLANS:
Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: value (type: string)
- outputColumnNames: _col1
+ outputColumnNames: _col0
Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Union
Statistics: Num rows: 502 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: 86 (type: int), _col1 (type: string), '3' (type: string)
+ expressions: 86 (type: int), _col0 (type: string), '3' (type: string)
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 502 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
File Output Operator
@@ -701,12 +701,12 @@ STAGE PLANS:
Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: value (type: string), ds (type: string)
- outputColumnNames: _col1, _col2
+ outputColumnNames: _col0, _col1
Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
Union
Statistics: Num rows: 1250 Data size: 13280 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: _col1 (type: string), _col2 (type: string)
+ expressions: _col0 (type: string), _col1 (type: string)
outputColumnNames: _col1, _col2
Statistics: Num rows: 1250 Data size: 13280 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
@@ -723,12 +723,12 @@ STAGE PLANS:
Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: value (type: string), ds (type: string)
- outputColumnNames: _col1, _col2
+ outputColumnNames: _col0, _col1
Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Union
Statistics: Num rows: 1250 Data size: 13280 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: _col1 (type: string), _col2 (type: string)
+ expressions: _col0 (type: string), _col1 (type: string)
outputColumnNames: _col1, _col2
Statistics: Num rows: 1250 Data size: 13280 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
@@ -745,12 +745,12 @@ STAGE PLANS:
Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: value (type: string), ds (type: string)
- outputColumnNames: _col1, _col2
+ outputColumnNames: _col0, _col1
Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Union
Statistics: Num rows: 1250 Data size: 13280 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: _col1 (type: string), _col2 (type: string)
+ expressions: _col0 (type: string), _col1 (type: string)
outputColumnNames: _col1, _col2
Statistics: Num rows: 1250 Data size: 13280 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
@@ -1226,12 +1226,12 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
expressions: value (type: string)
- outputColumnNames: _col1
+ outputColumnNames: _col0
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Union
Statistics: Num rows: 252 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: 86 (type: int), _col1 (type: string), '4' (type: string)
+ expressions: 86 (type: int), _col0 (type: string), '4' (type: string)
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 252 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
File Output Operator
@@ -1250,12 +1250,12 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
expressions: value (type: string)
- outputColumnNames: _col1
+ outputColumnNames: _col0
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Union
Statistics: Num rows: 252 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: 86 (type: int), _col1 (type: string), '4' (type: string)
+ expressions: 86 (type: int), _col0 (type: string), '4' (type: string)
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 252 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
File Output Operator
@@ -1274,12 +1274,12 @@ STAGE PLANS:
Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: value (type: string)
- outputColumnNames: _col1
+ outputColumnNames: _col0
Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
Union
Statistics: Num rows: 252 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: 86 (type: int), _col1 (type: string), '4' (type: string)
+ expressions: 86 (type: int), _col0 (type: string), '4' (type: string)
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 252 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
File Output Operator
[05/20] hive git commit: HIVE-13351: Support drop Primary Key/Foreign
Key constraints (Hari Subramaniyan, reviewed by Ashutosh Chauhan)
Posted by jd...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
index d392f67..3b3e05e 100644
--- a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
+++ b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
@@ -251,6 +251,8 @@ class ForeignKeysRequest;
class ForeignKeysResponse;
+class DropConstraintRequest;
+
class PartitionsByExprResult;
class PartitionsByExprRequest;
@@ -3779,6 +3781,56 @@ inline std::ostream& operator<<(std::ostream& out, const ForeignKeysResponse& ob
}
+class DropConstraintRequest {
+ public:
+
+ DropConstraintRequest(const DropConstraintRequest&);
+ DropConstraintRequest& operator=(const DropConstraintRequest&);
+ DropConstraintRequest() : dbname(), tablename(), constraintname() {
+ }
+
+ virtual ~DropConstraintRequest() throw();
+ std::string dbname;
+ std::string tablename;
+ std::string constraintname;
+
+ void __set_dbname(const std::string& val);
+
+ void __set_tablename(const std::string& val);
+
+ void __set_constraintname(const std::string& val);
+
+ bool operator == (const DropConstraintRequest & rhs) const
+ {
+ if (!(dbname == rhs.dbname))
+ return false;
+ if (!(tablename == rhs.tablename))
+ return false;
+ if (!(constraintname == rhs.constraintname))
+ return false;
+ return true;
+ }
+ bool operator != (const DropConstraintRequest &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const DropConstraintRequest & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+ virtual void printTo(std::ostream& out) const;
+};
+
+void swap(DropConstraintRequest &a, DropConstraintRequest &b);
+
+inline std::ostream& operator<<(std::ostream& out, const DropConstraintRequest& obj)
+{
+ obj.printTo(out);
+ return out;
+}
+
+
class PartitionsByExprResult {
public:
http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropConstraintRequest.java
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropConstraintRequest.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropConstraintRequest.java
new file mode 100644
index 0000000..4519dac
--- /dev/null
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropConstraintRequest.java
@@ -0,0 +1,591 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ * @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+public class DropConstraintRequest implements org.apache.thrift.TBase<DropConstraintRequest, DropConstraintRequest._Fields>, java.io.Serializable, Cloneable, Comparable<DropConstraintRequest> {
+ private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("DropConstraintRequest");
+
+ private static final org.apache.thrift.protocol.TField DBNAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbname", org.apache.thrift.protocol.TType.STRING, (short)1);
+ private static final org.apache.thrift.protocol.TField TABLENAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tablename", org.apache.thrift.protocol.TType.STRING, (short)2);
+ private static final org.apache.thrift.protocol.TField CONSTRAINTNAME_FIELD_DESC = new org.apache.thrift.protocol.TField("constraintname", org.apache.thrift.protocol.TType.STRING, (short)3);
+
+ private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+ static {
+ schemes.put(StandardScheme.class, new DropConstraintRequestStandardSchemeFactory());
+ schemes.put(TupleScheme.class, new DropConstraintRequestTupleSchemeFactory());
+ }
+
+ private String dbname; // required
+ private String tablename; // required
+ private String constraintname; // required
+
+ /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+ DBNAME((short)1, "dbname"),
+ TABLENAME((short)2, "tablename"),
+ CONSTRAINTNAME((short)3, "constraintname");
+
+ private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+ static {
+ for (_Fields field : EnumSet.allOf(_Fields.class)) {
+ byName.put(field.getFieldName(), field);
+ }
+ }
+
+ /**
+ * Find the _Fields constant that matches fieldId, or null if its not found.
+ */
+ public static _Fields findByThriftId(int fieldId) {
+ switch(fieldId) {
+ case 1: // DBNAME
+ return DBNAME;
+ case 2: // TABLENAME
+ return TABLENAME;
+ case 3: // CONSTRAINTNAME
+ return CONSTRAINTNAME;
+ default:
+ return null;
+ }
+ }
+
+ /**
+ * Find the _Fields constant that matches fieldId, throwing an exception
+ * if it is not found.
+ */
+ public static _Fields findByThriftIdOrThrow(int fieldId) {
+ _Fields fields = findByThriftId(fieldId);
+ if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+ return fields;
+ }
+
+ /**
+ * Find the _Fields constant that matches name, or null if its not found.
+ */
+ public static _Fields findByName(String name) {
+ return byName.get(name);
+ }
+
+ private final short _thriftId;
+ private final String _fieldName;
+
+ _Fields(short thriftId, String fieldName) {
+ _thriftId = thriftId;
+ _fieldName = fieldName;
+ }
+
+ public short getThriftFieldId() {
+ return _thriftId;
+ }
+
+ public String getFieldName() {
+ return _fieldName;
+ }
+ }
+
+ // isset id assignments
+ public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+ static {
+ Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+ tmpMap.put(_Fields.DBNAME, new org.apache.thrift.meta_data.FieldMetaData("dbname", org.apache.thrift.TFieldRequirementType.REQUIRED,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+ tmpMap.put(_Fields.TABLENAME, new org.apache.thrift.meta_data.FieldMetaData("tablename", org.apache.thrift.TFieldRequirementType.REQUIRED,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+ tmpMap.put(_Fields.CONSTRAINTNAME, new org.apache.thrift.meta_data.FieldMetaData("constraintname", org.apache.thrift.TFieldRequirementType.REQUIRED,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+ metaDataMap = Collections.unmodifiableMap(tmpMap);
+ org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(DropConstraintRequest.class, metaDataMap);
+ }
+
+ public DropConstraintRequest() {
+ }
+
+ public DropConstraintRequest(
+ String dbname,
+ String tablename,
+ String constraintname)
+ {
+ this();
+ this.dbname = dbname;
+ this.tablename = tablename;
+ this.constraintname = constraintname;
+ }
+
+ /**
+ * Performs a deep copy on <i>other</i>.
+ */
+ public DropConstraintRequest(DropConstraintRequest other) {
+ if (other.isSetDbname()) {
+ this.dbname = other.dbname;
+ }
+ if (other.isSetTablename()) {
+ this.tablename = other.tablename;
+ }
+ if (other.isSetConstraintname()) {
+ this.constraintname = other.constraintname;
+ }
+ }
+
+ public DropConstraintRequest deepCopy() {
+ return new DropConstraintRequest(this);
+ }
+
+ @Override
+ public void clear() {
+ this.dbname = null;
+ this.tablename = null;
+ this.constraintname = null;
+ }
+
+ public String getDbname() {
+ return this.dbname;
+ }
+
+ public void setDbname(String dbname) {
+ this.dbname = dbname;
+ }
+
+ public void unsetDbname() {
+ this.dbname = null;
+ }
+
+ /** Returns true if field dbname is set (has been assigned a value) and false otherwise */
+ public boolean isSetDbname() {
+ return this.dbname != null;
+ }
+
+ public void setDbnameIsSet(boolean value) {
+ if (!value) {
+ this.dbname = null;
+ }
+ }
+
+ public String getTablename() {
+ return this.tablename;
+ }
+
+ public void setTablename(String tablename) {
+ this.tablename = tablename;
+ }
+
+ public void unsetTablename() {
+ this.tablename = null;
+ }
+
+ /** Returns true if field tablename is set (has been assigned a value) and false otherwise */
+ public boolean isSetTablename() {
+ return this.tablename != null;
+ }
+
+ public void setTablenameIsSet(boolean value) {
+ if (!value) {
+ this.tablename = null;
+ }
+ }
+
+ public String getConstraintname() {
+ return this.constraintname;
+ }
+
+ public void setConstraintname(String constraintname) {
+ this.constraintname = constraintname;
+ }
+
+ public void unsetConstraintname() {
+ this.constraintname = null;
+ }
+
+ /** Returns true if field constraintname is set (has been assigned a value) and false otherwise */
+ public boolean isSetConstraintname() {
+ return this.constraintname != null;
+ }
+
+ public void setConstraintnameIsSet(boolean value) {
+ if (!value) {
+ this.constraintname = null;
+ }
+ }
+
+ public void setFieldValue(_Fields field, Object value) {
+ switch (field) {
+ case DBNAME:
+ if (value == null) {
+ unsetDbname();
+ } else {
+ setDbname((String)value);
+ }
+ break;
+
+ case TABLENAME:
+ if (value == null) {
+ unsetTablename();
+ } else {
+ setTablename((String)value);
+ }
+ break;
+
+ case CONSTRAINTNAME:
+ if (value == null) {
+ unsetConstraintname();
+ } else {
+ setConstraintname((String)value);
+ }
+ break;
+
+ }
+ }
+
+ public Object getFieldValue(_Fields field) {
+ switch (field) {
+ case DBNAME:
+ return getDbname();
+
+ case TABLENAME:
+ return getTablename();
+
+ case CONSTRAINTNAME:
+ return getConstraintname();
+
+ }
+ throw new IllegalStateException();
+ }
+
+ /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+ public boolean isSet(_Fields field) {
+ if (field == null) {
+ throw new IllegalArgumentException();
+ }
+
+ switch (field) {
+ case DBNAME:
+ return isSetDbname();
+ case TABLENAME:
+ return isSetTablename();
+ case CONSTRAINTNAME:
+ return isSetConstraintname();
+ }
+ throw new IllegalStateException();
+ }
+
+ @Override
+ public boolean equals(Object that) {
+ if (that == null)
+ return false;
+ if (that instanceof DropConstraintRequest)
+ return this.equals((DropConstraintRequest)that);
+ return false;
+ }
+
+ public boolean equals(DropConstraintRequest that) {
+ if (that == null)
+ return false;
+
+ boolean this_present_dbname = true && this.isSetDbname();
+ boolean that_present_dbname = true && that.isSetDbname();
+ if (this_present_dbname || that_present_dbname) {
+ if (!(this_present_dbname && that_present_dbname))
+ return false;
+ if (!this.dbname.equals(that.dbname))
+ return false;
+ }
+
+ boolean this_present_tablename = true && this.isSetTablename();
+ boolean that_present_tablename = true && that.isSetTablename();
+ if (this_present_tablename || that_present_tablename) {
+ if (!(this_present_tablename && that_present_tablename))
+ return false;
+ if (!this.tablename.equals(that.tablename))
+ return false;
+ }
+
+ boolean this_present_constraintname = true && this.isSetConstraintname();
+ boolean that_present_constraintname = true && that.isSetConstraintname();
+ if (this_present_constraintname || that_present_constraintname) {
+ if (!(this_present_constraintname && that_present_constraintname))
+ return false;
+ if (!this.constraintname.equals(that.constraintname))
+ return false;
+ }
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ List<Object> list = new ArrayList<Object>();
+
+ boolean present_dbname = true && (isSetDbname());
+ list.add(present_dbname);
+ if (present_dbname)
+ list.add(dbname);
+
+ boolean present_tablename = true && (isSetTablename());
+ list.add(present_tablename);
+ if (present_tablename)
+ list.add(tablename);
+
+ boolean present_constraintname = true && (isSetConstraintname());
+ list.add(present_constraintname);
+ if (present_constraintname)
+ list.add(constraintname);
+
+ return list.hashCode();
+ }
+
+ @Override
+ public int compareTo(DropConstraintRequest other) {
+ if (!getClass().equals(other.getClass())) {
+ return getClass().getName().compareTo(other.getClass().getName());
+ }
+
+ int lastComparison = 0;
+
+ lastComparison = Boolean.valueOf(isSetDbname()).compareTo(other.isSetDbname());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetDbname()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbname, other.dbname);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(isSetTablename()).compareTo(other.isSetTablename());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetTablename()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tablename, other.tablename);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(isSetConstraintname()).compareTo(other.isSetConstraintname());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetConstraintname()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.constraintname, other.constraintname);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ return 0;
+ }
+
+ public _Fields fieldForId(int fieldId) {
+ return _Fields.findByThriftId(fieldId);
+ }
+
+ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+ schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+ }
+
+ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+ schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder("DropConstraintRequest(");
+ boolean first = true;
+
+ sb.append("dbname:");
+ if (this.dbname == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.dbname);
+ }
+ first = false;
+ if (!first) sb.append(", ");
+ sb.append("tablename:");
+ if (this.tablename == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.tablename);
+ }
+ first = false;
+ if (!first) sb.append(", ");
+ sb.append("constraintname:");
+ if (this.constraintname == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.constraintname);
+ }
+ first = false;
+ sb.append(")");
+ return sb.toString();
+ }
+
+ public void validate() throws org.apache.thrift.TException {
+ // check for required fields
+ if (!isSetDbname()) {
+ throw new org.apache.thrift.protocol.TProtocolException("Required field 'dbname' is unset! Struct:" + toString());
+ }
+
+ if (!isSetTablename()) {
+ throw new org.apache.thrift.protocol.TProtocolException("Required field 'tablename' is unset! Struct:" + toString());
+ }
+
+ if (!isSetConstraintname()) {
+ throw new org.apache.thrift.protocol.TProtocolException("Required field 'constraintname' is unset! Struct:" + toString());
+ }
+
+ // check for sub-struct validity
+ }
+
+ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+ try {
+ write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+ } catch (org.apache.thrift.TException te) {
+ throw new java.io.IOException(te);
+ }
+ }
+
+ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+ try {
+ read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+ } catch (org.apache.thrift.TException te) {
+ throw new java.io.IOException(te);
+ }
+ }
+
+ private static class DropConstraintRequestStandardSchemeFactory implements SchemeFactory {
+ public DropConstraintRequestStandardScheme getScheme() {
+ return new DropConstraintRequestStandardScheme();
+ }
+ }
+
+ private static class DropConstraintRequestStandardScheme extends StandardScheme<DropConstraintRequest> {
+
+ public void read(org.apache.thrift.protocol.TProtocol iprot, DropConstraintRequest struct) throws org.apache.thrift.TException {
+ org.apache.thrift.protocol.TField schemeField;
+ iprot.readStructBegin();
+ while (true)
+ {
+ schemeField = iprot.readFieldBegin();
+ if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
+ break;
+ }
+ switch (schemeField.id) {
+ case 1: // DBNAME
+ if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+ struct.dbname = iprot.readString();
+ struct.setDbnameIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 2: // TABLENAME
+ if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+ struct.tablename = iprot.readString();
+ struct.setTablenameIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 3: // CONSTRAINTNAME
+ if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+ struct.constraintname = iprot.readString();
+ struct.setConstraintnameIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ default:
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ iprot.readFieldEnd();
+ }
+ iprot.readStructEnd();
+ struct.validate();
+ }
+
+ public void write(org.apache.thrift.protocol.TProtocol oprot, DropConstraintRequest struct) throws org.apache.thrift.TException {
+ struct.validate();
+
+ oprot.writeStructBegin(STRUCT_DESC);
+ if (struct.dbname != null) {
+ oprot.writeFieldBegin(DBNAME_FIELD_DESC);
+ oprot.writeString(struct.dbname);
+ oprot.writeFieldEnd();
+ }
+ if (struct.tablename != null) {
+ oprot.writeFieldBegin(TABLENAME_FIELD_DESC);
+ oprot.writeString(struct.tablename);
+ oprot.writeFieldEnd();
+ }
+ if (struct.constraintname != null) {
+ oprot.writeFieldBegin(CONSTRAINTNAME_FIELD_DESC);
+ oprot.writeString(struct.constraintname);
+ oprot.writeFieldEnd();
+ }
+ oprot.writeFieldStop();
+ oprot.writeStructEnd();
+ }
+
+ }
+
+ private static class DropConstraintRequestTupleSchemeFactory implements SchemeFactory {
+ public DropConstraintRequestTupleScheme getScheme() {
+ return new DropConstraintRequestTupleScheme();
+ }
+ }
+
+ private static class DropConstraintRequestTupleScheme extends TupleScheme<DropConstraintRequest> {
+
+ @Override
+ public void write(org.apache.thrift.protocol.TProtocol prot, DropConstraintRequest struct) throws org.apache.thrift.TException {
+ TTupleProtocol oprot = (TTupleProtocol) prot;
+ oprot.writeString(struct.dbname);
+ oprot.writeString(struct.tablename);
+ oprot.writeString(struct.constraintname);
+ }
+
+ @Override
+ public void read(org.apache.thrift.protocol.TProtocol prot, DropConstraintRequest struct) throws org.apache.thrift.TException {
+ TTupleProtocol iprot = (TTupleProtocol) prot;
+ struct.dbname = iprot.readString();
+ struct.setDbnameIsSet(true);
+ struct.tablename = iprot.readString();
+ struct.setTablenameIsSet(true);
+ struct.constraintname = iprot.readString();
+ struct.setConstraintnameIsSet(true);
+ }
+ }
+
+}
+
[07/20] hive git commit: HIVE-13351: Support drop Primary Key/Foreign
Key constraints (Hari Subramaniyan, reviewed by Ashutosh Chauhan)
Posted by jd...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
index 11d3322..990be15 100644
--- a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
+++ b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
@@ -41,6 +41,7 @@ class ThriftHiveMetastoreIf : virtual public ::facebook::fb303::FacebookService
virtual void create_table(const Table& tbl) = 0;
virtual void create_table_with_environment_context(const Table& tbl, const EnvironmentContext& environment_context) = 0;
virtual void create_table_with_constraints(const Table& tbl, const std::vector<SQLPrimaryKey> & primaryKeys, const std::vector<SQLForeignKey> & foreignKeys) = 0;
+ virtual void drop_constraint(const DropConstraintRequest& req) = 0;
virtual void drop_table(const std::string& dbname, const std::string& name, const bool deleteData) = 0;
virtual void drop_table_with_environment_context(const std::string& dbname, const std::string& name, const bool deleteData, const EnvironmentContext& environment_context) = 0;
virtual void get_tables(std::vector<std::string> & _return, const std::string& db_name, const std::string& pattern) = 0;
@@ -256,6 +257,9 @@ class ThriftHiveMetastoreNull : virtual public ThriftHiveMetastoreIf , virtual p
void create_table_with_constraints(const Table& /* tbl */, const std::vector<SQLPrimaryKey> & /* primaryKeys */, const std::vector<SQLForeignKey> & /* foreignKeys */) {
return;
}
+ void drop_constraint(const DropConstraintRequest& /* req */) {
+ return;
+ }
void drop_table(const std::string& /* dbname */, const std::string& /* name */, const bool /* deleteData */) {
return;
}
@@ -3032,6 +3036,118 @@ class ThriftHiveMetastore_create_table_with_constraints_presult {
};
+typedef struct _ThriftHiveMetastore_drop_constraint_args__isset {
+ _ThriftHiveMetastore_drop_constraint_args__isset() : req(false) {}
+ bool req :1;
+} _ThriftHiveMetastore_drop_constraint_args__isset;
+
+class ThriftHiveMetastore_drop_constraint_args {
+ public:
+
+ ThriftHiveMetastore_drop_constraint_args(const ThriftHiveMetastore_drop_constraint_args&);
+ ThriftHiveMetastore_drop_constraint_args& operator=(const ThriftHiveMetastore_drop_constraint_args&);
+ ThriftHiveMetastore_drop_constraint_args() {
+ }
+
+ virtual ~ThriftHiveMetastore_drop_constraint_args() throw();
+ DropConstraintRequest req;
+
+ _ThriftHiveMetastore_drop_constraint_args__isset __isset;
+
+ void __set_req(const DropConstraintRequest& val);
+
+ bool operator == (const ThriftHiveMetastore_drop_constraint_args & rhs) const
+ {
+ if (!(req == rhs.req))
+ return false;
+ return true;
+ }
+ bool operator != (const ThriftHiveMetastore_drop_constraint_args &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const ThriftHiveMetastore_drop_constraint_args & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class ThriftHiveMetastore_drop_constraint_pargs {
+ public:
+
+
+ virtual ~ThriftHiveMetastore_drop_constraint_pargs() throw();
+ const DropConstraintRequest* req;
+
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_drop_constraint_result__isset {
+ _ThriftHiveMetastore_drop_constraint_result__isset() : o1(false), o3(false) {}
+ bool o1 :1;
+ bool o3 :1;
+} _ThriftHiveMetastore_drop_constraint_result__isset;
+
+class ThriftHiveMetastore_drop_constraint_result {
+ public:
+
+ ThriftHiveMetastore_drop_constraint_result(const ThriftHiveMetastore_drop_constraint_result&);
+ ThriftHiveMetastore_drop_constraint_result& operator=(const ThriftHiveMetastore_drop_constraint_result&);
+ ThriftHiveMetastore_drop_constraint_result() {
+ }
+
+ virtual ~ThriftHiveMetastore_drop_constraint_result() throw();
+ NoSuchObjectException o1;
+ MetaException o3;
+
+ _ThriftHiveMetastore_drop_constraint_result__isset __isset;
+
+ void __set_o1(const NoSuchObjectException& val);
+
+ void __set_o3(const MetaException& val);
+
+ bool operator == (const ThriftHiveMetastore_drop_constraint_result & rhs) const
+ {
+ if (!(o1 == rhs.o1))
+ return false;
+ if (!(o3 == rhs.o3))
+ return false;
+ return true;
+ }
+ bool operator != (const ThriftHiveMetastore_drop_constraint_result &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const ThriftHiveMetastore_drop_constraint_result & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_drop_constraint_presult__isset {
+ _ThriftHiveMetastore_drop_constraint_presult__isset() : o1(false), o3(false) {}
+ bool o1 :1;
+ bool o3 :1;
+} _ThriftHiveMetastore_drop_constraint_presult__isset;
+
+class ThriftHiveMetastore_drop_constraint_presult {
+ public:
+
+
+ virtual ~ThriftHiveMetastore_drop_constraint_presult() throw();
+ NoSuchObjectException o1;
+ MetaException o3;
+
+ _ThriftHiveMetastore_drop_constraint_presult__isset __isset;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
typedef struct _ThriftHiveMetastore_drop_table_args__isset {
_ThriftHiveMetastore_drop_table_args__isset() : dbname(false), name(false), deleteData(false) {}
bool dbname :1;
@@ -18851,6 +18967,9 @@ class ThriftHiveMetastoreClient : virtual public ThriftHiveMetastoreIf, public
void create_table_with_constraints(const Table& tbl, const std::vector<SQLPrimaryKey> & primaryKeys, const std::vector<SQLForeignKey> & foreignKeys);
void send_create_table_with_constraints(const Table& tbl, const std::vector<SQLPrimaryKey> & primaryKeys, const std::vector<SQLForeignKey> & foreignKeys);
void recv_create_table_with_constraints();
+ void drop_constraint(const DropConstraintRequest& req);
+ void send_drop_constraint(const DropConstraintRequest& req);
+ void recv_drop_constraint();
void drop_table(const std::string& dbname, const std::string& name, const bool deleteData);
void send_drop_table(const std::string& dbname, const std::string& name, const bool deleteData);
void recv_drop_table();
@@ -19261,6 +19380,7 @@ class ThriftHiveMetastoreProcessor : public ::facebook::fb303::FacebookServiceP
void process_create_table(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
void process_create_table_with_environment_context(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
void process_create_table_with_constraints(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+ void process_drop_constraint(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
void process_drop_table(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
void process_drop_table_with_environment_context(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
void process_get_tables(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
@@ -19411,6 +19531,7 @@ class ThriftHiveMetastoreProcessor : public ::facebook::fb303::FacebookServiceP
processMap_["create_table"] = &ThriftHiveMetastoreProcessor::process_create_table;
processMap_["create_table_with_environment_context"] = &ThriftHiveMetastoreProcessor::process_create_table_with_environment_context;
processMap_["create_table_with_constraints"] = &ThriftHiveMetastoreProcessor::process_create_table_with_constraints;
+ processMap_["drop_constraint"] = &ThriftHiveMetastoreProcessor::process_drop_constraint;
processMap_["drop_table"] = &ThriftHiveMetastoreProcessor::process_drop_table;
processMap_["drop_table_with_environment_context"] = &ThriftHiveMetastoreProcessor::process_drop_table_with_environment_context;
processMap_["get_tables"] = &ThriftHiveMetastoreProcessor::process_get_tables;
@@ -19752,6 +19873,15 @@ class ThriftHiveMetastoreMultiface : virtual public ThriftHiveMetastoreIf, publi
ifaces_[i]->create_table_with_constraints(tbl, primaryKeys, foreignKeys);
}
+ void drop_constraint(const DropConstraintRequest& req) {
+ size_t sz = ifaces_.size();
+ size_t i = 0;
+ for (; i < (sz - 1); ++i) {
+ ifaces_[i]->drop_constraint(req);
+ }
+ ifaces_[i]->drop_constraint(req);
+ }
+
void drop_table(const std::string& dbname, const std::string& name, const bool deleteData) {
size_t sz = ifaces_.size();
size_t i = 0;
@@ -21045,6 +21175,9 @@ class ThriftHiveMetastoreConcurrentClient : virtual public ThriftHiveMetastoreIf
void create_table_with_constraints(const Table& tbl, const std::vector<SQLPrimaryKey> & primaryKeys, const std::vector<SQLForeignKey> & foreignKeys);
int32_t send_create_table_with_constraints(const Table& tbl, const std::vector<SQLPrimaryKey> & primaryKeys, const std::vector<SQLForeignKey> & foreignKeys);
void recv_create_table_with_constraints(const int32_t seqid);
+ void drop_constraint(const DropConstraintRequest& req);
+ int32_t send_drop_constraint(const DropConstraintRequest& req);
+ void recv_drop_constraint(const int32_t seqid);
void drop_table(const std::string& dbname, const std::string& name, const bool deleteData);
int32_t send_drop_table(const std::string& dbname, const std::string& name, const bool deleteData);
void recv_drop_table(const int32_t seqid);
http://git-wip-us.apache.org/repos/asf/hive/blob/212077b8/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
index fa87e34..2d13e77 100644
--- a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
+++ b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
@@ -117,6 +117,11 @@ class ThriftHiveMetastoreHandler : virtual public ThriftHiveMetastoreIf {
printf("create_table_with_constraints\n");
}
+ void drop_constraint(const DropConstraintRequest& req) {
+ // Your implementation goes here
+ printf("drop_constraint\n");
+ }
+
void drop_table(const std::string& dbname, const std::string& name, const bool deleteData) {
// Your implementation goes here
printf("drop_table\n");