You are viewing a plain text version of this content. The canonical link for it is here.
Posted to issues@kylin.apache.org by "Alexander (JIRA)" <ji...@apache.org> on 2019/06/10 17:12:00 UTC

[jira] [Created] (KYLIN-4038) Lookup tables is empty whn build on ORC tables.

Alexander created KYLIN-4038:
--------------------------------

             Summary: Lookup tables is empty whn build on ORC tables.
                 Key: KYLIN-4038
                 URL: https://issues.apache.org/jira/browse/KYLIN-4038
             Project: Kylin
          Issue Type: Bug
    Affects Versions: v2.6.2
         Environment: HDP 3 cluster.
            Reporter: Alexander


Create tables 
{code:java}
// Create tables
create table kylin_sales_sida (trans_id bigint,part_dt date,lstg_format_name string,leaf_categ_id bigint,lstg_site_id int,slr_segment_cd smallint,price decimal(19,4),item_count bigint,seller_id bigint,buyer_id bigint,ops_user_id string,ops_region string) stored as orc tblproperties ("orc.compress"="SNAPPY");

insert into table kylin_sales_sida select * from kylin_sales;

create table kylin_country_sida (country string,latitude double,longitude double,name string) stored as orc tblproperties ("orc.compress"="SNAPPY");

insert into table kylin_country_sida select * from kylin_country;

create table kylin_category_groupings_sida (leaf_categ_id bigint,leaf_categ_name string,site_id int,categ_busn_mgr string,categ_busn_unit string,regn_categ string,user_defined_field1 string,user_defined_field3 string,kylin_groupings_cre_date string,kylin_groupings_upd_date string,kylin_groupings_cre_user string,kylin_groupings_upd_user string,meta_categ_id decimal(10,0),meta_categ_name string,categ_lvl2_id decimal(10,0),categ_lvl3_id decimal(10,0),categ_lvl4_id decimal(10,0),categ_lvl5_id decimal(10,0),categ_lvl6_id decimal(10,0),categ_lvl7_id decimal(10,0),categ_lvl2_name string,categ_lvl3_name string,categ_lvl4_name string,categ_lvl5_name string,categ_lvl6_name string,categ_lvl7_name string,categ_flags decimal(10,0),adult_categ_yn string,domain_id decimal(10,0),user_defined_field5 string,vcs_id decimal(10,0),gcs_id decimal(10,0),move_to decimal(10,0),sap_category_id decimal(10,0),src_id tinyint,bsns_vrtcl_name string) stored as orc tblproperties ("orc.compress"="SNAPPY");

insert into table kylin_category_groupings_sida select * from kylin_category_groupings;

create table kylin_cal_dt_sida (cal_dt date,year_beg_dt date,qtr_beg_dt date,month_beg_dt date,week_beg_dt date,age_for_year_id smallint,age_for_qtr_id smallint,age_for_month_id smallint,age_for_week_id smallint,age_for_dt_id smallint,age_for_rtl_year_id smallint,age_for_rtl_qtr_id smallint,age_for_rtl_month_id smallint,age_for_rtl_week_id smallint,age_for_cs_week_id smallint,day_of_cal_id int,day_of_year_id smallint,day_of_qtr_id smallint,day_of_month_id smallint,day_of_week_id int,week_of_year_id tinyint,week_of_cal_id int,month_of_qtr_id tinyint,month_of_year_id tinyint,month_of_cal_id smallint,qtr_of_year_id tinyint,qtr_of_cal_id smallint,year_of_cal_id smallint,year_end_dt string,qtr_end_dt string,month_end_dt string,week_end_dt string,cal_dt_name string,cal_dt_desc string,cal_dt_short_name string,ytd_yn_id tinyint,qtd_yn_id tinyint,mtd_yn_id tinyint,wtd_yn_id tinyint,season_beg_dt string,day_in_year_count smallint,day_in_qtr_count tinyint,day_in_month_count tinyint,day_in_week_count tinyint,rtl_year_beg_dt string,rtl_qtr_beg_dt string,rtl_month_beg_dt string,rtl_week_beg_dt string,cs_week_beg_dt string,cal_date string,day_of_week string,month_id string,prd_desc string,prd_flag string,prd_id string,prd_ind string,qtr_desc string,qtr_id string,qtr_ind string,retail_week string,retail_year string,retail_start_date string,retail_wk_end_date string,week_ind string,week_num_desc string,week_beg_date string,week_end_date string,week_in_year_id string,week_id string,week_beg_end_desc_mdy string,week_beg_end_desc_md string,year_id string,year_ind string,cal_dt_mns_1year_dt string,cal_dt_mns_2year_dt string,cal_dt_mns_1qtr_dt string,cal_dt_mns_2qtr_dt string,cal_dt_mns_1month_dt string,cal_dt_mns_2month_dt string,cal_dt_mns_1week_dt string,cal_dt_mns_2week_dt string,curr_cal_dt_mns_1year_yn_id tinyint,curr_cal_dt_mns_2year_yn_id tinyint,curr_cal_dt_mns_1qtr_yn_id tinyint,curr_cal_dt_mns_2qtr_yn_id tinyint,curr_cal_dt_mns_1month_yn_id tinyint,curr_cal_dt_mns_2month_yn_id tinyint,curr_cal_dt_mns_1week_yn_ind tinyint,curr_cal_dt_mns_2week_yn_ind tinyint,rtl_month_of_rtl_year_id string,rtl_qtr_of_rtl_year_id tinyint,rtl_week_of_rtl_year_id tinyint,season_of_year_id tinyint,ytm_yn_id tinyint,ytq_yn_id tinyint,ytw_yn_id tinyint,kylin_cal_dt_cre_date string,kylin_cal_dt_cre_user string,kylin_cal_dt_upd_date string,kylin_cal_dt_upd_user string) stored as orc tblproperties ("orc.compress"="SNAPPY");

insert into table kylin_cal_dt_sida select * from kylin_cal_dt;

create table kylin_account_sida (account_id bigint,account_buyer_level int,account_seller_level int,account_country string,account_contact string) stored as orc tblproperties ("orc.compress"="SNAPPY");

insert into table kylin_account_sida select * from kylin_account;{code}
Create new project and data model, a manual copy of test cube learn_kylin. Add Lookup tables to HBase.

Build cube, and notice what the lookup tables builds empty:
{code:java}
// Result of Convert Lookup Table to HFile: step
2019-06-06 11:12:23,172 DEBUG [Scheduler 1312561281 Job a501b13d-1026-8fbf-5695-80483044f23b-580] common.HadoopCmdOutput:100 : Counters: 39
File System Counters
FILE: Number of bytes read=0
FILE: Number of bytes written=323522
FILE: Number of read operations=0
FILE: Number of large read operations=0
FILE: Number of write operations=0
HDFS: Number of bytes read=0
HDFS: Number of bytes written=0
HDFS: Number of read operations=1
HDFS: Number of large read operations=0
HDFS: Number of write operations=0
Job Counters
Launched reduce tasks=1
Total time spent by all reduces in occupied slots (ms)=21696
Total time spent by all reduce tasks (ms)=3616
Total vcore-milliseconds taken by all reduce tasks=3616
Total megabyte-milliseconds taken by all reduce tasks=22216704
Map-Reduce Framework
Combine input records=0
Combine output records=0
Reduce input groups=0
Reduce shuffle bytes=0
Reduce input records=0
Reduce output records=0
Spilled Records=0
Shuffled Maps =0
Failed Shuffles=0
Merged Map outputs=0
GC time elapsed (ms)=71
CPU time spent (ms)=840
Physical memory (bytes) snapshot=216272896
Virtual memory (bytes) snapshot=7290363904
Total committed heap usage (bytes)=191365120
Peak Reduce Physical memory (bytes)=216272896
Peak Reduce Virtual memory (bytes)=7290363904
Shuffle Errors
BAD_ID=0
CONNECTION=0
IO_ERROR=0
WRONG_LENGTH=0
WRONG_MAP=0
WRONG_REDUCE=0
File Output Format Counters
Bytes Written=0
2019-06-06 11:12:23,172 DEBUG [Scheduler 1312561281 Job a501b13d-1026-8fbf-5695-80483044f23b-580] common.HadoopCmdOutput:107 : outputFolder is hdfs://********:8020/kylin/kylin_metadata/kylin-a501b13d-1026-8fbf-5695-80483044f23b/DEFAULT.KYLIN_ACCOUNT_SIDA/hfile
2019-06-06 11:12:23,173 DEBUG [Scheduler 1312561281 Job a501b13d-1026-8fbf-5695-80483044f23b-580] common.HadoopCmdOutput:112 : Seems no counter found for hdfs
2019-06-06 11:12:23,202 INFO [Scheduler 1312561281 Job a501b13d-1026-8fbf-5695-80483044f23b-580] execution.ExecutableManager:453 : job id:a501b13d-1026-8fbf-5695-80483044f23b-05 from RUNNING to SUCCEED
{code}
As result any Query with lookup tables data goes to be incorrect. As they are missed.

(In Build Dimension Dictionary step the same thing, empty lookup data goes into cube calculation)

If i run fo each tables compact major:
{code:java}
// Compact Major on orc tables
ALTER TABLE DEFAULT.KYLIN_ACCOUNT_SIDA COMPACT 'MAJOR';
... ALTER all tables{code}
After that, a can rerun cube build. The result will be error in MR job:
{code:java}
// MR Exception
2019-06-07 11:51:54,482 INFO [Scheduler 1312561281 Job 6d0f79da-7902-1b2c-a49e-fa6b6190911c-580] orc.OrcInputFormat:1737 : ORC pushdown predicate: null
2019-06-07 11:51:55,052 INFO [ORC_GET_SPLITS #7] impl.OrcCodecPool:56 : Got brand-new codec SNAPPY
2019-06-07 11:51:55,263 INFO [Scheduler 1312561281 Job 6d0f79da-7902-1b2c-a49e-fa6b6190911c-580] orc.OrcInputFormat:1855 : FooterCacheHitRatio: 0/1
2019-06-07 11:51:55,297 INFO [Scheduler 1312561281 Job 6d0f79da-7902-1b2c-a49e-fa6b6190911c-580] mapreduce.JobSubmitter:202 : number of splits:1
2019-06-07 11:51:55,329 INFO [Scheduler 1312561281 Job 6d0f79da-7902-1b2c-a49e-fa6b6190911c-580] mapreduce.JobSubmitter:298 : Submitting tokens for job: job_1559213270190_0372
2019-06-07 11:51:55,329 INFO [Scheduler 1312561281 Job 6d0f79da-7902-1b2c-a49e-fa6b6190911c-580] mapreduce.JobSubmitter:299 : Executing with tokens: []
2019-06-07 11:51:55,572 INFO [Scheduler 1312561281 Job 6d0f79da-7902-1b2c-a49e-fa6b6190911c-580] impl.YarnClientImpl:306 : Submitted application application_1559213270190_0372
2019-06-07 11:51:55,575 INFO [Scheduler 1312561281 Job 6d0f79da-7902-1b2c-a49e-fa6b6190911c-580] mapreduce.Job:1574 : The url to track the job: http://******:8088/proxy/application_1559213270190_0372/
2019-06-07 11:51:58,408 INFO [FetcherRunner 833282134-77] threadpool.DefaultFetcherRunner:94 : Job Fetcher: 1 should running, 1 actual running, 0 stopped, 0 ready, 229 already succeed, 61 error, 0 discarded, 1 others
2019-06-07 11:52:28,408 INFO [FetcherRunner 833282134-77] threadpool.DefaultFetcherRunner:94 : Job Fetcher: 1 should running, 1 actual running, 0 stopped, 0 ready, 229 already succeed, 61 error, 0 discarded, 1 others
2019-06-07 11:52:31,361 INFO [BadQueryDetector] service.BadQueryDetector:147 : Detect bad query.
2019-06-07 11:52:58,407 INFO [FetcherRunner 833282134-77] threadpool.DefaultFetcherRunner:94 : Job Fetcher: 1 should running, 1 actual running, 0 stopped, 0 ready, 229 already succeed, 61 error, 0 discarded, 1 others
2019-06-07 11:53:05,805 DEBUG [Scheduler 1312561281 Job 6d0f79da-7902-1b2c-a49e-fa6b6190911c-580] common.HadoopCmdOutput:100 : Counters: 10
Job Counters
Failed map tasks=4
Killed reduce tasks=1
Launched map tasks=4
Other local map tasks=3
Data-local map tasks=1
Total time spent by all maps in occupied slots (ms)=136272
Total time spent by all reduces in occupied slots (ms)=0
Total time spent by all map tasks (ms)=45424
Total vcore-milliseconds taken by all map tasks=45424
Total megabyte-milliseconds taken by all map tasks=139542528
2019-06-07 11:53:05,806 DEBUG [Scheduler 1312561281 Job 6d0f79da-7902-1b2c-a49e-fa6b6190911c-580] common.HadoopCmdOutput:107 : outputFolder is hdfs://apachai1.apm.local:8020/kylin/kylin_metadata/kylin-6d0f79da-7902-1b2c-a49e-fa6b6190911c/DEFAULT.KYLIN_ACCOUNT_SIDA/hfile
2019-06-07 11:53:05,807 DEBUG [Scheduler 1312561281 Job 6d0f79da-7902-1b2c-a49e-fa6b6190911c-580] common.HadoopCmdOutput:112 : Seems no counter found for hdfs
2019-06-07 11:53:05,808 WARN [Scheduler 1312561281 Job 6d0f79da-7902-1b2c-a49e-fa6b6190911c-580] common.HadoopCmdOutput:119 : Job Diagnostics:Task failed task_1559213270190_0372_m_000000
Job failed as tasks failed. failedMaps:1 failedReduces:0 killedMaps:0 killedReduces: 0

2019-06-07 11:53:05,994 WARN [Scheduler 1312561281 Job 6d0f79da-7902-1b2c-a49e-fa6b6190911c-580] common.HadoopCmdOutput:124 : Failure task Diagnostics:
2019-06-07 11:53:05,994 WARN [Scheduler 1312561281 Job 6d0f79da-7902-1b2c-a49e-fa6b6190911c-580] common.HadoopCmdOutput:127 : Error: java.lang.ClassCastException: org.apache.hadoop.io.IntWritable cannot be cast to org.apache.hadoop.io.LongWritable
at org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableLongObjectInspector.getPrimitiveJavaObject(WritableLongObjectInspector.java:46)
at org.apache.hive.hcatalog.data.HCatRecordSerDe.serializePrimitiveField(HCatRecordSerDe.java:278)
at org.apache.hive.hcatalog.data.HCatRecordSerDe.serializeField(HCatRecordSerDe.java:199)
at org.apache.hive.hcatalog.data.LazyHCatRecord.get(LazyHCatRecord.java:53)
at org.apache.hive.hcatalog.data.LazyHCatRecord.get(LazyHCatRecord.java:97)
at org.apache.hive.hcatalog.mapreduce.HCatRecordReader.nextKeyValue(HCatRecordReader.java:204)
at org.apache.hadoop.mapred.MapTask$NewTrackingRecordReader.nextKeyValue(MapTask.java:568)
at org.apache.hadoop.mapreduce.task.MapContextImpl.nextKeyValue(MapContextImpl.java:80)
at org.apache.hadoop.mapreduce.lib.map.WrappedMapper$Context.nextKeyValue(WrappedMapper.java:91)
at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145)
at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:799)
at org.apache.hadoop.mapred.MapTask.run(MapTask.java:347)
at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:174)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:422)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1730)
at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:168)

2019-06-07 11:53:06,004 INFO [Scheduler 1312561281 Job 6d0f79da-7902-1b2c-a49e-fa6b6190911c-580] execution.ExecutableManager:453 : job id:6d0f79da-7902-1b2c-a49e-fa6b6190911c-05 from RUNNING to ERROR
2019-06-07 11:53:06,005 ERROR [Scheduler 1312561281 Job 6d0f79da-7902-1b2c-a49e-fa6b6190911c-580] execution.AbstractExecutable:167 : error running Executable: CubingJob{id=6d0f79da-7902-1b2c-a49e-fa6b6190911c, name=BUILD CUBE - kylin_sales_cube_orc - 20120101000000_20140101000000 - MSK 2019-06-07 11:36:49, state=RUNNING}
2019-06-07 11:53:06,008 DEBUG [pool-7-thread-1] cachesync.Broadcaster:116 : Servers in the cluster: [localhost:7070]
2019-06-07 11:53:06,009 DEBUG [pool-7-thread-1] cachesync.Broadcaster:126 : Announcing new broadcast to all: BroadcastEvent{entity=execute_output, event=update, cacheKey=6d0f79da-7902-1b2c-a49e-fa6b6190911c}
2019-06-07 11:53:06,012 INFO [Scheduler 1312561281 Job 6d0f79da-7902-1b2c-a49e-fa6b6190911c-580] execution.ExecutableManager:453 : job id:6d0f79da-7902-1b2c-a49e-fa6b6190911c from RUNNING to ERROR
2019-06-07 11:53:06,012 DEBUG [pool-7-thread-1] cachesync.Broadcaster:116 : Servers in the cluster: [localhost:7070]
2019-06-07 11:53:06,012 DEBUG [http-nio-7070-exec-1] cachesync.Broadcaster:246 : Broadcasting UPDATE, execute_output, 6d0f79da-7902-1b2c-a49e-fa6b6190911c
2019-06-07 11:53:06,012 DEBUG [Scheduler 1312561281 Job 6d0f79da-7902-1b2c-a49e-fa6b6190911c-580] execution.AbstractExecutable:318 : no need to send email, user list is empty
2019-06-07 11:53:06,012 DEBUG [pool-7-thread-1] cachesync.Broadcaster:126 : Announcing new broadcast to all: BroadcastEvent{entity=execute_output, event=update, cacheKey=6d0f79da-7902-1b2c-a49e-fa6b6190911c}
2019-06-07 11:53:06,013 DEBUG [http-nio-7070-exec-1] cachesync.Broadcaster:280 : Done broadcasting UPDATE, execute_output, 6d0f79da-7902-1b2c-a49e-fa6b6190911c
2019-06-07 11:53:06,014 ERROR [pool-11-thread-10] threadpool.DefaultScheduler:116 : ExecuteException job:6d0f79da-7902-1b2c-a49e-fa6b6190911c
org.apache.kylin.job.exception.ExecuteException: org.apache.kylin.job.exception.ExecuteException: org.apache.kylin.engine.mr.exception.MapReduceException: Counters: 10
Job Counters
Failed map tasks=4
Killed reduce tasks=1
Launched map tasks=4
Other local map tasks=3
Data-local map tasks=1
Total time spent by all maps in occupied slots (ms)=136272
Total time spent by all reduces in occupied slots (ms)=0
Total time spent by all map tasks (ms)=45424
Total vcore-milliseconds taken by all map tasks=45424
Total megabyte-milliseconds taken by all map tasks=139542528
Job Diagnostics:Task failed task_1559213270190_0372_m_000000
Job failed as tasks failed. failedMaps:1 failedReduces:0 killedMaps:0 killedReduces: 0

Failure task Diagnostics:
Error: java.lang.ClassCastException: org.apache.hadoop.io.IntWritable cannot be cast to org.apache.hadoop.io.LongWritable
at org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableLongObjectInspector.getPrimitiveJavaObject(WritableLongObjectInspector.java:46)
at org.apache.hive.hcatalog.data.HCatRecordSerDe.serializePrimitiveField(HCatRecordSerDe.java:278)
at org.apache.hive.hcatalog.data.HCatRecordSerDe.serializeField(HCatRecordSerDe.java:199)
at org.apache.hive.hcatalog.data.LazyHCatRecord.get(LazyHCatRecord.java:53)
at org.apache.hive.hcatalog.data.LazyHCatRecord.get(LazyHCatRecord.java:97)
at org.apache.hive.hcatalog.mapreduce.HCatRecordReader.nextKeyValue(HCatRecordReader.java:204)
at org.apache.hadoop.mapred.MapTask$NewTrackingRecordReader.nextKeyValue(MapTask.java:568)
at org.apache.hadoop.mapreduce.task.MapContextImpl.nextKeyValue(MapContextImpl.java:80)
at org.apache.hadoop.mapreduce.lib.map.WrappedMapper$Context.nextKeyValue(WrappedMapper.java:91)
at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145)
at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:799)
at org.apache.hadoop.mapred.MapTask.run(MapTask.java:347)
at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:174)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:422)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1730)
at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:168)


at org.apache.kylin.job.execution.AbstractExecutable.execute(AbstractExecutable.java:180)
at org.apache.kylin.job.impl.threadpool.DefaultScheduler$JobRunner.run(DefaultScheduler.java:114)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
at java.lang.Thread.run(Thread.java:745)
Caused by: org.apache.kylin.job.exception.ExecuteException: org.apache.kylin.engine.mr.exception.MapReduceException: Counters: 10
Job Counters
Failed map tasks=4
Killed reduce tasks=1
Launched map tasks=4
Other local map tasks=3
Data-local map tasks=1
Total time spent by all maps in occupied slots (ms)=136272
Total time spent by all reduces in occupied slots (ms)=0
Total time spent by all map tasks (ms)=45424
Total vcore-milliseconds taken by all map tasks=45424
Total megabyte-milliseconds taken by all map tasks=139542528
Job Diagnostics:Task failed task_1559213270190_0372_m_000000
Job failed as tasks failed. failedMaps:1 failedReduces:0 killedMaps:0 killedReduces: 0

Failure task Diagnostics:
Error: java.lang.ClassCastException: org.apache.hadoop.io.IntWritable cannot be cast to org.apache.hadoop.io.LongWritable
at org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableLongObjectInspector.getPrimitiveJavaObject(WritableLongObjectInspector.java:46)
at org.apache.hive.hcatalog.data.HCatRecordSerDe.serializePrimitiveField(HCatRecordSerDe.java:278)
at org.apache.hive.hcatalog.data.HCatRecordSerDe.serializeField(HCatRecordSerDe.java:199)
at org.apache.hive.hcatalog.data.LazyHCatRecord.get(LazyHCatRecord.java:53)
at org.apache.hive.hcatalog.data.LazyHCatRecord.get(LazyHCatRecord.java:97)
at org.apache.hive.hcatalog.mapreduce.HCatRecordReader.nextKeyValue(HCatRecordReader.java:204)
at org.apache.hadoop.mapred.MapTask$NewTrackingRecordReader.nextKeyValue(MapTask.java:568)
at org.apache.hadoop.mapreduce.task.MapContextImpl.nextKeyValue(MapContextImpl.java:80)
at org.apache.hadoop.mapreduce.lib.map.WrappedMapper$Context.nextKeyValue(WrappedMapper.java:91)
at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145)
at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:799)
at org.apache.hadoop.mapred.MapTask.run(MapTask.java:347)
at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:174)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:422)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1730)
at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:168)


at org.apache.kylin.job.execution.AbstractExecutable.execute(AbstractExecutable.java:180)
at org.apache.kylin.job.execution.DefaultChainedExecutable.doWork(DefaultChainedExecutable.java:70)
at org.apache.kylin.job.execution.AbstractExecutable.execute(AbstractExecutable.java:165)
... 4 more
Caused by: org.apache.kylin.engine.mr.exception.MapReduceException: Counters: 10
Job Counters
Failed map tasks=4
Killed reduce tasks=1
Launched map tasks=4
Other local map tasks=3
Data-local map tasks=1
Total time spent by all maps in occupied slots (ms)=136272
Total time spent by all reduces in occupied slots (ms)=0
Total time spent by all map tasks (ms)=45424
Total vcore-milliseconds taken by all map tasks=45424
Total megabyte-milliseconds taken by all map tasks=139542528
Job Diagnostics:Task failed task_1559213270190_0372_m_000000
Job failed as tasks failed. failedMaps:1 failedReduces:0 killedMaps:0 killedReduces: 0

Failure task Diagnostics:
Error: java.lang.ClassCastException: org.apache.hadoop.io.IntWritable cannot be cast to org.apache.hadoop.io.LongWritable
at org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableLongObjectInspector.getPrimitiveJavaObject(WritableLongObjectInspector.java:46)
at org.apache.hive.hcatalog.data.HCatRecordSerDe.serializePrimitiveField(HCatRecordSerDe.java:278)
at org.apache.hive.hcatalog.data.HCatRecordSerDe.serializeField(HCatRecordSerDe.java:199)
at org.apache.hive.hcatalog.data.LazyHCatRecord.get(LazyHCatRecord.java:53)
at org.apache.hive.hcatalog.data.LazyHCatRecord.get(LazyHCatRecord.java:97)
at org.apache.hive.hcatalog.mapreduce.HCatRecordReader.nextKeyValue(HCatRecordReader.java:204)
at org.apache.hadoop.mapred.MapTask$NewTrackingRecordReader.nextKeyValue(MapTask.java:568)
at org.apache.hadoop.mapreduce.task.MapContextImpl.nextKeyValue(MapContextImpl.java:80)
at org.apache.hadoop.mapreduce.lib.map.WrappedMapper$Context.nextKeyValue(WrappedMapper.java:91)
at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145)
at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:799)
at org.apache.hadoop.mapred.MapTask.run(MapTask.java:347)
at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:174)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:422)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1730)
at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:168)


at org.apache.kylin.engine.mr.common.MapReduceExecutable.doWork(MapReduceExecutable.java:173)
at org.apache.kylin.job.execution.AbstractExecutable.execute(AbstractExecutable.java:165)
... 6 more

{code}
Looks like the problem ib HCatalog that used by Kylin to access Hive tables... Please help, with solution finding... 

 



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)