You are viewing a plain text version of this content. The canonical link for it is here.
Posted to issues@carbondata.apache.org by "Jonathan.Wei (JIRA)" <ji...@apache.org> on 2018/07/20 01:33:00 UTC

[jira] [Created] (CARBONDATA-2761) [MV]Create the MV datamap of join and query for throwing org.apache.hive.service.cli.HiveSQLException: java.lang.StackOverflowError exception.

Jonathan.Wei created CARBONDATA-2761:
----------------------------------------

             Summary: [MV]Create the MV datamap of join and query for throwing org.apache.hive.service.cli.HiveSQLException: java.lang.StackOverflowError exception.
                 Key: CARBONDATA-2761
                 URL: https://issues.apache.org/jira/browse/CARBONDATA-2761
             Project: CarbonData
          Issue Type: Bug
    Affects Versions: 1.4.0
         Environment: 5 Nodes Centos7 
cpu 12core  
memory 32G
            Reporter: Jonathan.Wei


CREATE TABLE CarbonOrders (
CustomerID STRING,
EmployeeID INT,
OrderDate STRING,
RequiredDate STRING,
ShippedDate STRING,
ShipVia INT,
Freight INT,
ShipName STRING,
ShipAddress STRING,
ShipCity STRING,
ShipRegion STRING,
ShipPostalCode STRING,
ShipCountry STRING,
property1 STRING,
property2 STRING,
property3 STRING,
property4 STRING,
property5 STRING,
property6 STRING,
property7 STRING,
property8 STRING,
property9 STRING,
property10 STRING,
property11 STRING,
property12 STRING,
property13 STRING,
property14 STRING,
property15 STRING,
property16 STRING,
property17 STRING,
property18 STRING,
property19 STRING,
property20 STRING,
property21 STRING,
property22 STRING,
property23 STRING,
property24 STRING,
property25 STRING,
property26 STRING,
property27 STRING,
property28 STRING,
property29 STRING,
property30 STRING,
property31 STRING,
property32 STRING,
property33 STRING,
property34 STRING,
property35 STRING,
property36 STRING,
property37 STRING,
property38 STRING,
property39 STRING,
property40 STRING)
PARTITIONED BY(OrderID INT)
STORED BY 'carbondata' TBLPROPERTIES(
'PARTITION_TYPE'='hash',
'NUM_PARTITIONS'='6',
'SORT_COLUMNS'='OrderID,CustomerID,EmployeeID,ShipName,ShipAddress,property38,property39,property40',
'DICTIONARY_INCLUDE'='EmployeeID,property38,property39,property40',
'NO_INVERTED_INDEX'='OrderID,CustomerID',
'MAJOR_COMPACTION_SIZE'='2048',
'AUTO_LOAD_MERGE'='true',
'COMPACTION_LEVEL_THRESHOLD'='5,6',
'COMPACTION_PRESERVE_SEGMENTS'='10',
'ALLOWED_COMPACTION_DAYS'='5',
'TABLE_BLOCKSIZE'='256',
'BUCKETNUMBER'='10',
'BUCKETCOLUMNS'='CustomerID');

 

CREATE TABLE CarbonEmployees(
EmployeeID INT,
LastName STRING,
FirstName STRING,
Title STRING,
TitleOfCourtesy STRING,
BirthDate STRING,
HireDate STRING,
Address STRING,
City STRING,
Region STRING,
PostalCode STRING,
Country STRING,
HomePhone STRING,
Extension STRING,
Photo STRING,
Notes STRING,
ReportsTo INT,
PhotoPath STRING)
STORED BY 'carbondata' TBLPROPERTIES(
'DICTIONARY_INCLUDE'='EmployeeID,City,Region',
'SORT_COLUMNS'='EmployeeID,City,Region',
'MAJOR_COMPACTION_SIZE'='2048',
'AUTO_LOAD_MERGE'='true',
'COMPACTION_LEVEL_THRESHOLD'='5,6',
'COMPACTION_PRESERVE_SEGMENTS'='10',
'ALLOWED_COMPACTION_DAYS'='5');

 

CREATE DATAMAP TWOTABLE_GROUP USING 'mv' as select od.EmployeeID,od.CustomerID from pcarbonorders as od inner join carbonemployees as e on od.EmployeeID=e.EmployeeID group by od.EmployeeID,od.CustomerID;

rebuild datamap TWOTABLE_GROUP;

 

0: jdbc:hive2://192.168.2.37:10000> explain select od.EmployeeID,od.CustomerID from pcarbonorders as od inner join carbonemployees as e on od.EmployeeID=e.EmployeeID group by od.EmployeeID,od.CustomerID limit 10;
Error: java.lang.StackOverflowError (state=,code=0)

 

LOG:

18/07/20 09:20:08 INFO SparkExecuteStatementOperation: Running query 'select od.EmployeeID,od.CustomerID from carbonorders as od inner join carbonemployees as e on od.EmployeeID=e.EmployeeID group by od.EmployeeID,od.CustomerID limit 10' with ef9ff840-fffd-4a8a-8763-9f558f0cdbf9
18/07/20 09:20:08 INFO CarbonSparkSqlParser: Parsing command: select od.EmployeeID,od.CustomerID from pcarbonorders as od inner join carbonemployees as e on od.EmployeeID=e.EmployeeID group by od.EmployeeID,od.CustomerID limit 10
18/07/20 09:20:08 INFO CatalystSqlParser: Parsing command: array<string>
18/07/20 09:20:08 INFO CatalystSqlParser: Parsing command: array<string>
18/07/20 09:20:08 INFO CatalystSqlParser: Parsing command: array<string>
18/07/20 09:20:08 INFO CatalystSqlParser: Parsing command: array<string>
18/07/20 09:20:09 INFO TableInfo: pool-21-thread-4 Table block size not specified for default_carbonemployees. Therefore considering the default value 1024 MB
18/07/20 09:20:09 ERROR SparkExecuteStatementOperation: Error executing query, currentState RUNNING,
java.lang.StackOverflowError
 at org.apache.spark.sql.catalyst.expressions.AttributeMap.get(AttributeMap.scala:34)
 at org.apache.spark.sql.catalyst.expressions.AttributeMap.get(AttributeMap.scala:31)
 at scala.collection.MapLike$class.apply(MapLike.scala:140)
 at org.apache.spark.sql.catalyst.expressions.AttributeMap.apply(AttributeMap.scala:31)
 at org.apache.carbondata.mv.rewrite.SelectSelectGroupbyChildDelta$$anonfun$13.applyOrElse(DefaultMatchMaker.scala:616)
 at org.apache.carbondata.mv.rewrite.SelectSelectGroupbyChildDelta$$anonfun$13.applyOrElse(DefaultMatchMaker.scala:615)
 at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$2.apply(TreeNode.scala:267)
 at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$2.apply(TreeNode.scala:267)
 at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:70)
 at org.apache.spark.sql.catalyst.trees.TreeNode.transformDown(TreeNode.scala:266)
 at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$transformDown$2.apply(TreeNode.scala:274)
 at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$transformDown$2.apply(TreeNode.scala:274)
 at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$4.apply(TreeNode.scala:306)
 at org.apache.spark.sql.catalyst.trees.TreeNode.mapProductIterator(TreeNode.scala:187)
 at org.apache.spark.sql.catalyst.trees.TreeNode.mapChildren(TreeNode.scala:304)
 at org.apache.spark.sql.catalyst.trees.TreeNode.transformDown(TreeNode.scala:274)
 at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$transformDown$2.apply(TreeNode.scala:274)
 at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$transformDown$2.apply(TreeNode.scala:274)
 at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$4.apply(TreeNode.scala:306)
 at org.apache.spark.sql.catalyst.trees.TreeNode.mapProductIterator(TreeNode.scala:187)
 at org.apache.spark.sql.catalyst.trees.TreeNode.mapChildren(TreeNode.scala:304)
 at org.apache.spark.sql.catalyst.trees.TreeNode.transformDown(TreeNode.scala:274)
 at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$transformDown$2.apply(TreeNode.scala:274)
 at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$transformDown$2.apply(TreeNode.scala:274)
 at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$4.apply(TreeNode.scala:306)
 at org.apache.spark.sql.catalyst.trees.TreeNode.mapProductIterator(TreeNode.scala:187)
 at org.apache.spark.sql.catalyst.trees.TreeNode.mapChildren(TreeNode.scala:304)
 at org.apache.spark.sql.catalyst.trees.TreeNode.transformDown(TreeNode.scala:274)
 at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$transformDown$2.apply(TreeNode.scala:274)
 at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$transformDown$2.apply(TreeNode.scala:274)

...

at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$4.apply(TreeNode.scala:306)
 at org.apache.spark.sql.catalyst.trees.TreeNode.mapProductIterator(TreeNode.scala:187)
 at org.apache.spark.sql.catalyst.trees.TreeNode.mapChildren(TreeNode.scala:304)
 at org.apache.spark.sql.catalyst.trees.TreeNode.transformDown(TreeNode.scala:274)
18/07/19 16:57:38 ERROR SparkExecuteStatementOperation: Error running hive query:
org.apache.hive.service.cli.HiveSQLException: java.lang.StackOverflowError
 at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.org$apache$spark$sql$hive$thriftserver$SparkExecuteStatementOperation$$execute(SparkExecuteStatementOperation.scala:268)
 at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:174)
 at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:171)
 at java.security.AccessController.doPrivileged(Native Method)
 at javax.security.auth.Subject.doAs(Subject.java:422)
 at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1698)
 at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1.run(SparkExecuteStatementOperation.scala:184)
 at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
 at java.util.concurrent.FutureTask.run(FutureTask.java:266)
 at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
 at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
 at java.lang.Thread.run(Thread.java:748)

 

 



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)