You are viewing a plain text version of this content. The canonical link for it is here.
Posted to dev@carbondata.apache.org by 柯旭 <ku...@broadtech.com.cn> on 2016/08/02 06:51:59 UTC

issues:load local scv file occur "input path does not exist "exception

I have a question,when i load a local file,occur " input path does not exist:/root/294test.csv" exception,but the local file "/root/294test.csv"is exists,i found input path pointing hdfs path,no matter what i add or no add "file://" prefix,it is pointing hdfs path,but "sc.textFile("file:///root/294test.csv")" it's no problem.


 I using spark-shell ,mster is local mode,my table have 112 field,sql is" cc.sql("LOAD DATA local inpath 'file:///root/294test.csv' into table  carbon.test2 options('FILEHEADER'='"+r2.trim().replace(" ","")+"')")",r2 is csv header. If upload the csv file to hdfs,it success can be loaded.


log:
INFO  02-08 14:41:46,971 - main Property file path: /root/../../../conf/carbon.properties
INFO  02-08 14:41:46,971 - main ------Using Carbon.properties --------
INFO  02-08 14:41:46,972 - main {}
INFO  02-08 14:41:46,972 - main Query [LOAD DATA LOCAL INPATH 'FILE:///ROOT/294TEST.CSV' INTO TABLE  CARBON.TEST2 OPTIONS('FILEHEADER'='LENGTH,CITY,INTERFACE,XDR_ID,RAT,IMSI,IMEI,MSISDN,MACHINE_IP_ADD_TYPE,SGW_GGSN_IP_ADD,ENB_SGSN_IP_ADD,SGW_GGSN_PORT,ENB_SGSN_PORT,ENB_SGSN_GTP_TEID,SGW_GGSN_GTP_TEID,TAC,CELL_ID,APN,APP_TYPE_CODE,PROCEDURE_STARTTIME,PROCEDURE_ENDTIME,PROTOCOL_TYPE,APP_TYPE,APP_SUB_TYPE,APP_CONTENT,APP_STATUS,USER_IPV4,USER_IPV6,USER_PORT,L4_PROTOCAL,APP_SERVER_IP_IPV4,APP_SERVER_IP_IPV6,APP_SERVER_PORT,UL_DATA,DL_DATA,UL_IP_PACKET,DL_IP_PACKET,UP_TCP_DISORDER_NUM,DOWN_TCP_DISORDER_NUM,UP_TCP_RETRANS_NUM,DOWN_TCP_RETRANS_NUM,TCP_CREACTLINK_RESPONSE_DELAY,TCP_CREACTLINK_CONFIRM_DELAY,UL_IP_FRAG_PACKETS,DL_IP_FRAG_PACKETS,TCP_1STREQUEST_DELAY,TCP_1STREUEST_RESPONSE_DELAY,TCP_WINDOW_SIZE,TCP_MSS_SIZE,TCP_CREATELINK_TRYTIMES,TCP_LINK_STATUS,SESSION_FINISH_INDICATOR,ULRTTTIMES,DLRTTTIMES,ULRTTTOTALDELAY,DLRTTTOTALDELAY,ULZEROWINDOWTIMES,DLZEROWINDOWTIMES,ULZEROWINDOWTOTALTIME,DLZEROWINDOWTOTALTIME,SESSIONRESETINDICATION,SESSIONRESETDIRECTION,INTSID1,INTSID2,INTAPPID,INTKEYWORDTYPEID,VCKEYWORD,INTWRULEID,INTARULEID,INTKRULEID,UP_TCP_LOST_NUM,DOWN_TCP_LOST_NUM,VERSION,TRANSACTION_TYPE,HTTP_WAP_AFFAIR_STATUS,HTTP_1STACK_1STREQ_DELAY,HTTP_LASTPACKET_1STREQ_DELAY,HTTP_LASTACK_DELAY,HOST,URI,XONLINEHOST,USER_AGENT,HTTP_CONTENT_TYPE,REFER_URI,COOKIE,CONTENT_LENGTH,TARGET_BEHAVIOR,WTP_INTERRUPT_TYPE,WTP_INTERRUPT_CAUSE,TITLE,KEYWORD,BUSS_BEHAVIOR_FLAG,BUSS_FINISH_FLAG,BUSS_DELAY,BUSS_BROWSER,PORTAL_APP_SET,IMUSERNAME,IMSOFTWARE_VERSION,IMOPERATION_TYPE,IMCLIENT_TYPE,IMDELAY,IMACTION,IMRESULT,RELOCATIONURI,BUFFERTIME,ESTPLAYTIME,REALPLAYTIME,CODERATE,VIDEOSIZE,STREAMIND,ISRATEAVAIL,DL_RATE_DATA')]
INFO  02-08 14:41:48,434 - Successfully able to get the table metadata file lock
INFO  02-08 14:41:48,505 - main Initiating Direct Load for the Table : (carbon.test2)
INFO  02-08 14:41:48,575 - [Block Distribution]
INFO  02-08 14:41:48,576 - totalInputSpaceConsumed : 294033120 , defaultParallelism : 60
INFO  02-08 14:41:48,577 - mapreduce.input.fileinputformat.split.maxsize : 16777216
INFO  02-08 14:41:48,917 - Block broadcast_0 stored as values in memory (estimated size 258.4 KB, free 258.4 KB)
INFO  02-08 14:41:49,323 - Block broadcast_0_piece0 stored as bytes in memory (estimated size 22.8 KB, free 281.2 KB)
INFO  02-08 14:41:49,326 - Added broadcast_0_piece0 in memory on localhost:51768 (size: 22.8 KB, free: 4.1 GB)
INFO  02-08 14:41:49,331 - Created broadcast 0 from NewHadoopRDD at CarbonTextFile.scala:45
ERROR 02-08 14:41:49,418 - generate global dictionary failed
ERROR 02-08 14:41:49,419 - main 
org.apache.hadoop.mapreduce.lib.input.InvalidInputException: Input path does not exist: /root/294test.csv
	at org.apache.hadoop.mapreduce.lib.input.FileInputFormat.singleThreadedListStatus(FileInputFormat.java:321)
	at org.apache.hadoop.mapreduce.lib.input.FileInputFormat.listStatus(FileInputFormat.java:264)
	at org.apache.hadoop.mapreduce.lib.input.FileInputFormat.getSplits(FileInputFormat.java:385)
	at org.apache.spark.rdd.NewHadoopRDD.getPartitions(NewHadoopRDD.scala:120)
	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)
	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)
	at scala.Option.getOrElse(Option.scala:120)
	at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)
	at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)
	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)
	at scala.Option.getOrElse(Option.scala:120)
	at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)
	at org.apache.spark.rdd.RDD$$anonfun$take$1.apply(RDD.scala:1307)
	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:150)
	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:111)
	at org.apache.spark.rdd.RDD.withScope(RDD.scala:316)
	at org.apache.spark.rdd.RDD.take(RDD.scala:1302)
	at com.databricks.spark.csv.CarbonCsvRelation.firstLine$lzycompute(CarbonCsvRelation.scala:175)
	at com.databricks.spark.csv.CarbonCsvRelation.firstLine(CarbonCsvRelation.scala:170)
	at com.databricks.spark.csv.CarbonCsvRelation.inferSchema(CarbonCsvRelation.scala:141)
	at com.databricks.spark.csv.CarbonCsvRelation.<init>(CarbonCsvRelation.scala:71)
	at com.databricks.spark.csv.newapi.DefaultSource.createRelation(DefaultSource.scala:142)
	at com.databricks.spark.csv.newapi.DefaultSource.createRelation(DefaultSource.scala:44)
	at org.apache.spark.sql.execution.datasources.ResolvedDataSource$.apply(ResolvedDataSource.scala:158)
	at org.apache.spark.sql.DataFrameReader.load(DataFrameReader.scala:119)
	at org.apache.spark.sql.DataFrameReader.load(DataFrameReader.scala:109)
	at org.carbondata.spark.util.GlobalDictionaryUtil$.loadDataFrame(GlobalDictionaryUtil.scala:375)
	at org.carbondata.spark.util.GlobalDictionaryUtil$.generateGlobalDictionary(GlobalDictionaryUtil.scala:462)
	at org.apache.spark.sql.execution.command.LoadTable.run(carbonTableSchema.scala:1149)
	at org.apache.spark.sql.execution.ExecutedCommand.sideEffectResult$lzycompute(commands.scala:58)
	at org.apache.spark.sql.execution.ExecutedCommand.sideEffectResult(commands.scala:56)
	at org.apache.spark.sql.execution.ExecutedCommand.doExecute(commands.scala:70)
	at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$5.apply(SparkPlan.scala:132)
	at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$5.apply(SparkPlan.scala:130)
	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:150)
	at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:130)
	at org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:55)
	at org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:55)
	at org.apache.spark.sql.DataFrame.<init>(DataFrame.scala:145)
	at org.apache.spark.sql.DataFrame.<init>(DataFrame.scala:130)
	at org.carbondata.spark.rdd.CarbonDataFrameRDD.<init>(CarbonDataFrameRDD.scala:23)
	at org.apache.spark.sql.CarbonContext.sql(CarbonContext.scala:130)
	at $line35.$read$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:41)
	at $line35.$read$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:46)
	at $line35.$read$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:48)
	at $line35.$read$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:50)
	at $line35.$read$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:52)
	at $line35.$read$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:54)
	at $line35.$read$$iwC$$iwC$$iwC$$iwC.<init>(<console>:56)
	at $line35.$read$$iwC$$iwC$$iwC.<init>(<console>:58)
	at $line35.$read$$iwC$$iwC.<init>(<console>:60)
	at $line35.$read$$iwC.<init>(<console>:62)
	at $line35.$read.<init>(<console>:64)
	at $line35.$read$.<init>(<console>:68)
	at $line35.$read$.<clinit>(<console>)
	at $line35.$eval$.<init>(<console>:7)
	at $line35.$eval$.<clinit>(<console>)
	at $line35.$eval.$print(<console>)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.spark.repl.SparkIMain$ReadEvalPrint.call(SparkIMain.scala:1065)
	at org.apache.spark.repl.SparkIMain$Request.loadAndRun(SparkIMain.scala:1346)
	at org.apache.spark.repl.SparkIMain.loadAndRunReq$1(SparkIMain.scala:840)
	at org.apache.spark.repl.SparkIMain.interpret(SparkIMain.scala:871)
	at org.apache.spark.repl.SparkIMain.interpret(SparkIMain.scala:819)
	at org.apache.spark.repl.SparkILoop.reallyInterpret$1(SparkILoop.scala:857)
	at org.apache.spark.repl.SparkILoop.interpretStartingWith(SparkILoop.scala:902)
	at org.apache.spark.repl.SparkILoop.command(SparkILoop.scala:814)
	at org.apache.spark.repl.SparkILoop.processLine$1(SparkILoop.scala:657)
	at org.apache.spark.repl.SparkILoop.innerLoop$1(SparkILoop.scala:665)
	at org.apache.spark.repl.SparkILoop.org$apache$spark$repl$SparkILoop$$loop(SparkILoop.scala:670)
	at org.apache.spark.repl.SparkILoop$$anonfun$org$apache$spark$repl$SparkILoop$$process$1.apply$mcZ$sp(SparkILoop.scala:997)
	at org.apache.spark.repl.SparkILoop$$anonfun$org$apache$spark$repl$SparkILoop$$process$1.apply(SparkILoop.scala:945)
	at org.apache.spark.repl.SparkILoop$$anonfun$org$apache$spark$repl$SparkILoop$$process$1.apply(SparkILoop.scala:945)
	at scala.tools.nsc.util.ScalaClassLoader$.savingContextLoader(ScalaClassLoader.scala:135)
	at org.apache.spark.repl.SparkILoop.org$apache$spark$repl$SparkILoop$$process(SparkILoop.scala:945)
	at org.apache.spark.repl.SparkILoop.process(SparkILoop.scala:1059)
	at org.apache.spark.repl.Main$.main(Main.scala:31)
	at org.apache.spark.repl.Main.main(Main.scala)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.spark.deploy.SparkSubmit$.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:731)
	at org.apache.spark.deploy.SparkSubmit$.doRunMain$1(SparkSubmit.scala:181)
	at org.apache.spark.deploy.SparkSubmit$.submit(SparkSubmit.scala:206)
	at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:121)
	at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)
AUDIT 02-08 14:41:49,422 - [master][root][Thread-1]Dataload failure for carbon.test2. Please check the logs
INFO  02-08 14:41:49,424 - Table MetaData Unlocked Successfully after data load
org.apache.hadoop.mapreduce.lib.input.InvalidInputException: Input path does not exist: /root/294test.csv
	at org.apache.hadoop.mapreduce.lib.input.FileInputFormat.singleThreadedListStatus(FileInputFormat.java:321)
	at org.apache.hadoop.mapreduce.lib.input.FileInputFormat.listStatus(FileInputFormat.java:264)
	at org.apache.hadoop.mapreduce.lib.input.FileInputFormat.getSplits(FileInputFormat.java:385)
	at org.apache.spark.rdd.NewHadoopRDD.getPartitions(NewHadoopRDD.scala:120)
	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)
	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)
	at scala.Option.getOrElse(Option.scala:120)
	at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)
	at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)
	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)
	at scala.Option.getOrElse(Option.scala:120)
	at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)
	at org.apache.spark.rdd.RDD$$anonfun$take$1.apply(RDD.scala:1307)
	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:150)
	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:111)
	at org.apache.spark.rdd.RDD.withScope(RDD.scala:316)
	at org.apache.spark.rdd.RDD.take(RDD.scala:1302)
	at com.databricks.spark.csv.CarbonCsvRelation.firstLine$lzycompute(CarbonCsvRelation.scala:175)
	at com.databricks.spark.csv.CarbonCsvRelation.firstLine(CarbonCsvRelation.scala:170)
	at com.databricks.spark.csv.CarbonCsvRelation.inferSchema(CarbonCsvRelation.scala:141)
	at com.databricks.spark.csv.CarbonCsvRelation.<init>(CarbonCsvRelation.scala:71)
	at com.databricks.spark.csv.newapi.DefaultSource.createRelation(DefaultSource.scala:142)
	at com.databricks.spark.csv.newapi.DefaultSource.createRelation(DefaultSource.scala:44)
	at org.apache.spark.sql.execution.datasources.ResolvedDataSource$.apply(ResolvedDataSource.scala:158)
	at org.apache.spark.sql.DataFrameReader.load(DataFrameReader.scala:119)
	at org.apache.spark.sql.DataFrameReader.load(DataFrameReader.scala:109)
	at org.carbondata.spark.util.GlobalDictionaryUtil$.loadDataFrame(GlobalDictionaryUtil.scala:375)
	at org.carbondata.spark.util.GlobalDictionaryUtil$.generateGlobalDictionary(GlobalDictionaryUtil.scala:462)
	at org.apache.spark.sql.execution.command.LoadTable.run(carbonTableSchema.scala:1149)
	at org.apache.spark.sql.execution.ExecutedCommand.sideEffectResult$lzycompute(commands.scala:58)
	at org.apache.spark.sql.execution.ExecutedCommand.sideEffectResult(commands.scala:56)
	at org.apache.spark.sql.execution.ExecutedCommand.doExecute(commands.scala:70)
	at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$5.apply(SparkPlan.scala:132)
	at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$5.apply(SparkPlan.scala:130)
	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:150)
	at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:130)
	at org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:55)
	at org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:55)
	at org.apache.spark.sql.DataFrame.<init>(DataFrame.scala:145)
	at org.apache.spark.sql.DataFrame.<init>(DataFrame.scala:130)
	at org.carbondata.spark.rdd.CarbonDataFrameRDD.<init>(CarbonDataFrameRDD.scala:23)
	at org.apache.spark.sql.CarbonContext.sql(CarbonContext.scala:130)
	at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:41)
	at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:46)
	at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:48)
	at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:50)
	at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:52)
	at $iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:54)
	at $iwC$$iwC$$iwC$$iwC.<init>(<console>:56)
	at $iwC$$iwC$$iwC.<init>(<console>:58)
	at $iwC$$iwC.<init>(<console>:60)
	at $iwC.<init>(<console>:62)
	at <init>(<console>:64)
	at .<init>(<console>:68)
	at .<clinit>(<console>)
	at .<init>(<console>:7)
	at .<clinit>(<console>)
	at $print(<console>)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.spark.repl.SparkIMain$ReadEvalPrint.call(SparkIMain.scala:1065)
	at org.apache.spark.repl.SparkIMain$Request.loadAndRun(SparkIMain.scala:1346)
	at org.apache.spark.repl.SparkIMain.loadAndRunReq$1(SparkIMain.scala:840)
	at org.apache.spark.repl.SparkIMain.interpret(SparkIMain.scala:871)
	at org.apache.spark.repl.SparkIMain.interpret(SparkIMain.scala:819)
	at org.apache.spark.repl.SparkILoop.reallyInterpret$1(SparkILoop.scala:857)
	at org.apache.spark.repl.SparkILoop.interpretStartingWith(SparkILoop.scala:902)
	at org.apache.spark.repl.SparkILoop.command(SparkILoop.scala:814)
	at org.apache.spark.repl.SparkILoop.processLine$1(SparkILoop.scala:657)
	at org.apache.spark.repl.SparkILoop.innerLoop$1(SparkILoop.scala:665)
	at org.apache.spark.repl.SparkILoop.org$apache$spark$repl$SparkILoop$$loop(SparkILoop.scala:670)
	at org.apache.spark.repl.SparkILoop$$anonfun$org$apache$spark$repl$SparkILoop$$process$1.apply$mcZ$sp(SparkILoop.scala:997)
	at org.apache.spark.repl.SparkILoop$$anonfun$org$apache$spark$repl$SparkILoop$$process$1.apply(SparkILoop.scala:945)
	at org.apache.spark.repl.SparkILoop$$anonfun$org$apache$spark$repl$SparkILoop$$process$1.apply(SparkILoop.scala:945)
	at scala.tools.nsc.util.ScalaClassLoader$.savingContextLoader(ScalaClassLoader.scala:135)
	at org.apache.spark.repl.SparkILoop.org$apache$spark$repl$SparkILoop$$process(SparkILoop.scala:945)
	at org.apache.spark.repl.SparkILoop.process(SparkILoop.scala:1059)
	at org.apache.spark.repl.Main$.main(Main.scala:31)
	at org.apache.spark.repl.Main.main(Main.scala)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.spark.deploy.SparkSubmit$.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:731)
	at org.apache.spark.deploy.SparkSubmit$.doRunMain$1(SparkSubmit.scala:181)
	at org.apache.spark.deploy.SparkSubmit$.submit(SparkSubmit.scala:206)
	at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:121)
	at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)



 thanks a lot!

回复:issues:load local scv file occur "input path does not exist "exception

Posted by caiqiang <qi...@qq.com>.
Hi,
You are loading local data in spark local mode.
Kindly please check Carbon.properties path and comment the property carbon.ddl.base.hdfs.url.


Regards,
David Caiqiang
------------------ 原始邮件 ------------------
发件人: "柯旭";<ku...@broadtech.com.cn>;
发送时间: 2016年8月2日(星期二) 下午2:51
收件人: "dev"<de...@carbondata.incubator.apache.org>; 

主题: issues:load local scv file occur "input path does not exist "exception



I have a question,when i load a local file,occur " input path does not exist:/root/294test.csv" exception,but the local file "/root/294test.csv"is exists,i found input path pointing hdfs path,no matter what i add or no add "file://" prefix,it is pointing hdfs path,but "sc.textFile("file:///root/294test.csv")" it's no problem.


 I using spark-shell ,mster is local mode,my table have 112 field,sql is" cc.sql("LOAD DATA local inpath 'file:///root/294test.csv' into table  carbon.test2 options('FILEHEADER'='"+r2.trim().replace(" ","")+"')")",r2 is csv header. If upload the csv file to hdfs,it success can be loaded.


log:
INFO  02-08 14:41:46,971 - main Property file path: /root/../../../conf/carbon.properties
INFO  02-08 14:41:46,971 - main ------Using Carbon.properties --------
INFO  02-08 14:41:46,972 - main {}
INFO  02-08 14:41:46,972 - main Query [LOAD DATA LOCAL INPATH 'FILE:///ROOT/294TEST.CSV' INTO TABLE  CARBON.TEST2 OPTIONS('FILEHEADER'='LENGTH,CITY,INTERFACE,XDR_ID,RAT,IMSI,IMEI,MSISDN,MACHINE_IP_ADD_TYPE,SGW_GGSN_IP_ADD,ENB_SGSN_IP_ADD,SGW_GGSN_PORT,ENB_SGSN_PORT,ENB_SGSN_GTP_TEID,SGW_GGSN_GTP_TEID,TAC,CELL_ID,APN,APP_TYPE_CODE,PROCEDURE_STARTTIME,PROCEDURE_ENDTIME,PROTOCOL_TYPE,APP_TYPE,APP_SUB_TYPE,APP_CONTENT,APP_STATUS,USER_IPV4,USER_IPV6,USER_PORT,L4_PROTOCAL,APP_SERVER_IP_IPV4,APP_SERVER_IP_IPV6,APP_SERVER_PORT,UL_DATA,DL_DATA,UL_IP_PACKET,DL_IP_PACKET,UP_TCP_DISORDER_NUM,DOWN_TCP_DISORDER_NUM,UP_TCP_RETRANS_NUM,DOWN_TCP_RETRANS_NUM,TCP_CREACTLINK_RESPONSE_DELAY,TCP_CREACTLINK_CONFIRM_DELAY,UL_IP_FRAG_PACKETS,DL_IP_FRAG_PACKETS,TCP_1STREQUEST_DELAY,TCP_1STREUEST_RESPONSE_DELAY,TCP_WINDOW_SIZE,TCP_MSS_SIZE,TCP_CREATELINK_TRYTIMES,TCP_LINK_STATUS,SESSION_FINISH_INDICATOR,ULRTTTIMES,DLRTTTIMES,ULRTTTOTALDELAY,DLRTTTOTALDELAY,ULZEROWINDOWTIMES,DLZEROWINDOWTIMES,ULZEROWINDOWTOTALTIME,DLZEROWINDOWTOTALTIME,SESSIONRESETINDICATION,SESSIONRESETDIRECTION,INTSID1,INTSID2,INTAPPID,INTKEYWORDTYPEID,VCKEYWORD,INTWRULEID,INTARULEID,INTKRULEID,UP_TCP_LOST_NUM,DOWN_TCP_LOST_NUM,VERSION,TRANSACTION_TYPE,HTTP_WAP_AFFAIR_STATUS,HTTP_1STACK_1STREQ_DELAY,HTTP_LASTPACKET_1STREQ_DELAY,HTTP_LASTACK_DELAY,HOST,URI,XONLINEHOST,USER_AGENT,HTTP_CONTENT_TYPE,REFER_URI,COOKIE,CONTENT_LENGTH,TARGET_BEHAVIOR,WTP_INTERRUPT_TYPE,WTP_INTERRUPT_CAUSE,TITLE,KEYWORD,BUSS_BEHAVIOR_FLAG,BUSS_FINISH_FLAG,BUSS_DELAY,BUSS_BROWSER,PORTAL_APP_SET,IMUSERNAME,IMSOFTWARE_VERSION,IMOPERATION_TYPE,IMCLIENT_TYPE,IMDELAY,IMACTION,IMRESULT,RELOCATIONURI,BUFFERTIME,ESTPLAYTIME,REALPLAYTIME,CODERATE,VIDEOSIZE,STREAMIND,ISRATEAVAIL,DL_RATE_DATA')]
INFO  02-08 14:41:48,434 - Successfully able to get the table metadata file lock
INFO  02-08 14:41:48,505 - main Initiating Direct Load for the Table : (carbon.test2)
INFO  02-08 14:41:48,575 - [Block Distribution]
INFO  02-08 14:41:48,576 - totalInputSpaceConsumed : 294033120 , defaultParallelism : 60
INFO  02-08 14:41:48,577 - mapreduce.input.fileinputformat.split.maxsize : 16777216
INFO  02-08 14:41:48,917 - Block broadcast_0 stored as values in memory (estimated size 258.4 KB, free 258.4 KB)
INFO  02-08 14:41:49,323 - Block broadcast_0_piece0 stored as bytes in memory (estimated size 22.8 KB, free 281.2 KB)
INFO  02-08 14:41:49,326 - Added broadcast_0_piece0 in memory on localhost:51768 (size: 22.8 KB, free: 4.1 GB)
INFO  02-08 14:41:49,331 - Created broadcast 0 from NewHadoopRDD at CarbonTextFile.scala:45
ERROR 02-08 14:41:49,418 - generate global dictionary failed
ERROR 02-08 14:41:49,419 - main 
org.apache.hadoop.mapreduce.lib.input.InvalidInputException: Input path does not exist: /root/294test.csv
	at org.apache.hadoop.mapreduce.lib.input.FileInputFormat.singleThreadedListStatus(FileInputFormat.java:321)
	at org.apache.hadoop.mapreduce.lib.input.FileInputFormat.listStatus(FileInputFormat.java:264)
	at org.apache.hadoop.mapreduce.lib.input.FileInputFormat.getSplits(FileInputFormat.java:385)
	at org.apache.spark.rdd.NewHadoopRDD.getPartitions(NewHadoopRDD.scala:120)
	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)
	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)
	at scala.Option.getOrElse(Option.scala:120)
	at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)
	at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)
	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)
	at scala.Option.getOrElse(Option.scala:120)
	at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)
	at org.apache.spark.rdd.RDD$$anonfun$take$1.apply(RDD.scala:1307)
	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:150)
	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:111)
	at org.apache.spark.rdd.RDD.withScope(RDD.scala:316)
	at org.apache.spark.rdd.RDD.take(RDD.scala:1302)
	at com.databricks.spark.csv.CarbonCsvRelation.firstLine$lzycompute(CarbonCsvRelation.scala:175)
	at com.databricks.spark.csv.CarbonCsvRelation.firstLine(CarbonCsvRelation.scala:170)
	at com.databricks.spark.csv.CarbonCsvRelation.inferSchema(CarbonCsvRelation.scala:141)
	at com.databricks.spark.csv.CarbonCsvRelation.<init>(CarbonCsvRelation.scala:71)
	at com.databricks.spark.csv.newapi.DefaultSource.createRelation(DefaultSource.scala:142)
	at com.databricks.spark.csv.newapi.DefaultSource.createRelation(DefaultSource.scala:44)
	at org.apache.spark.sql.execution.datasources.ResolvedDataSource$.apply(ResolvedDataSource.scala:158)
	at org.apache.spark.sql.DataFrameReader.load(DataFrameReader.scala:119)
	at org.apache.spark.sql.DataFrameReader.load(DataFrameReader.scala:109)
	at org.carbondata.spark.util.GlobalDictionaryUtil$.loadDataFrame(GlobalDictionaryUtil.scala:375)
	at org.carbondata.spark.util.GlobalDictionaryUtil$.generateGlobalDictionary(GlobalDictionaryUtil.scala:462)
	at org.apache.spark.sql.execution.command.LoadTable.run(carbonTableSchema.scala:1149)
	at org.apache.spark.sql.execution.ExecutedCommand.sideEffectResult$lzycompute(commands.scala:58)
	at org.apache.spark.sql.execution.ExecutedCommand.sideEffectResult(commands.scala:56)
	at org.apache.spark.sql.execution.ExecutedCommand.doExecute(commands.scala:70)
	at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$5.apply(SparkPlan.scala:132)
	at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$5.apply(SparkPlan.scala:130)
	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:150)
	at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:130)
	at org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:55)
	at org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:55)
	at org.apache.spark.sql.DataFrame.<init>(DataFrame.scala:145)
	at org.apache.spark.sql.DataFrame.<init>(DataFrame.scala:130)
	at org.carbondata.spark.rdd.CarbonDataFrameRDD.<init>(CarbonDataFrameRDD.scala:23)
	at org.apache.spark.sql.CarbonContext.sql(CarbonContext.scala:130)
	at $line35.$read$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:41)
	at $line35.$read$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:46)
	at $line35.$read$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:48)
	at $line35.$read$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:50)
	at $line35.$read$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:52)
	at $line35.$read$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:54)
	at $line35.$read$$iwC$$iwC$$iwC$$iwC.<init>(<console>:56)
	at $line35.$read$$iwC$$iwC$$iwC.<init>(<console>:58)
	at $line35.$read$$iwC$$iwC.<init>(<console>:60)
	at $line35.$read$$iwC.<init>(<console>:62)
	at $line35.$read.<init>(<console>:64)
	at $line35.$read$.<init>(<console>:68)
	at $line35.$read$.<clinit>(<console>)
	at $line35.$eval$.<init>(<console>:7)
	at $line35.$eval$.<clinit>(<console>)
	at $line35.$eval.$print(<console>)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.spark.repl.SparkIMain$ReadEvalPrint.call(SparkIMain.scala:1065)
	at org.apache.spark.repl.SparkIMain$Request.loadAndRun(SparkIMain.scala:1346)
	at org.apache.spark.repl.SparkIMain.loadAndRunReq$1(SparkIMain.scala:840)
	at org.apache.spark.repl.SparkIMain.interpret(SparkIMain.scala:871)
	at org.apache.spark.repl.SparkIMain.interpret(SparkIMain.scala:819)
	at org.apache.spark.repl.SparkILoop.reallyInterpret$1(SparkILoop.scala:857)
	at org.apache.spark.repl.SparkILoop.interpretStartingWith(SparkILoop.scala:902)
	at org.apache.spark.repl.SparkILoop.command(SparkILoop.scala:814)
	at org.apache.spark.repl.SparkILoop.processLine$1(SparkILoop.scala:657)
	at org.apache.spark.repl.SparkILoop.innerLoop$1(SparkILoop.scala:665)
	at org.apache.spark.repl.SparkILoop.org$apache$spark$repl$SparkILoop$$loop(SparkILoop.scala:670)
	at org.apache.spark.repl.SparkILoop$$anonfun$org$apache$spark$repl$SparkILoop$$process$1.apply$mcZ$sp(SparkILoop.scala:997)
	at org.apache.spark.repl.SparkILoop$$anonfun$org$apache$spark$repl$SparkILoop$$process$1.apply(SparkILoop.scala:945)
	at org.apache.spark.repl.SparkILoop$$anonfun$org$apache$spark$repl$SparkILoop$$process$1.apply(SparkILoop.scala:945)
	at scala.tools.nsc.util.ScalaClassLoader$.savingContextLoader(ScalaClassLoader.scala:135)
	at org.apache.spark.repl.SparkILoop.org$apache$spark$repl$SparkILoop$$process(SparkILoop.scala:945)
	at org.apache.spark.repl.SparkILoop.process(SparkILoop.scala:1059)
	at org.apache.spark.repl.Main$.main(Main.scala:31)
	at org.apache.spark.repl.Main.main(Main.scala)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.spark.deploy.SparkSubmit$.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:731)
	at org.apache.spark.deploy.SparkSubmit$.doRunMain$1(SparkSubmit.scala:181)
	at org.apache.spark.deploy.SparkSubmit$.submit(SparkSubmit.scala:206)
	at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:121)
	at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)
AUDIT 02-08 14:41:49,422 - [master][root][Thread-1]Dataload failure for carbon.test2. Please check the logs
INFO  02-08 14:41:49,424 - Table MetaData Unlocked Successfully after data load
org.apache.hadoop.mapreduce.lib.input.InvalidInputException: Input path does not exist: /root/294test.csv
	at org.apache.hadoop.mapreduce.lib.input.FileInputFormat.singleThreadedListStatus(FileInputFormat.java:321)
	at org.apache.hadoop.mapreduce.lib.input.FileInputFormat.listStatus(FileInputFormat.java:264)
	at org.apache.hadoop.mapreduce.lib.input.FileInputFormat.getSplits(FileInputFormat.java:385)
	at org.apache.spark.rdd.NewHadoopRDD.getPartitions(NewHadoopRDD.scala:120)
	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)
	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)
	at scala.Option.getOrElse(Option.scala:120)
	at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)
	at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)
	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)
	at scala.Option.getOrElse(Option.scala:120)
	at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)
	at org.apache.spark.rdd.RDD$$anonfun$take$1.apply(RDD.scala:1307)
	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:150)
	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:111)
	at org.apache.spark.rdd.RDD.withScope(RDD.scala:316)
	at org.apache.spark.rdd.RDD.take(RDD.scala:1302)
	at com.databricks.spark.csv.CarbonCsvRelation.firstLine$lzycompute(CarbonCsvRelation.scala:175)
	at com.databricks.spark.csv.CarbonCsvRelation.firstLine(CarbonCsvRelation.scala:170)
	at com.databricks.spark.csv.CarbonCsvRelation.inferSchema(CarbonCsvRelation.scala:141)
	at com.databricks.spark.csv.CarbonCsvRelation.<init>(CarbonCsvRelation.scala:71)
	at com.databricks.spark.csv.newapi.DefaultSource.createRelation(DefaultSource.scala:142)
	at com.databricks.spark.csv.newapi.DefaultSource.createRelation(DefaultSource.scala:44)
	at org.apache.spark.sql.execution.datasources.ResolvedDataSource$.apply(ResolvedDataSource.scala:158)
	at org.apache.spark.sql.DataFrameReader.load(DataFrameReader.scala:119)
	at org.apache.spark.sql.DataFrameReader.load(DataFrameReader.scala:109)
	at org.carbondata.spark.util.GlobalDictionaryUtil$.loadDataFrame(GlobalDictionaryUtil.scala:375)
	at org.carbondata.spark.util.GlobalDictionaryUtil$.generateGlobalDictionary(GlobalDictionaryUtil.scala:462)
	at org.apache.spark.sql.execution.command.LoadTable.run(carbonTableSchema.scala:1149)
	at org.apache.spark.sql.execution.ExecutedCommand.sideEffectResult$lzycompute(commands.scala:58)
	at org.apache.spark.sql.execution.ExecutedCommand.sideEffectResult(commands.scala:56)
	at org.apache.spark.sql.execution.ExecutedCommand.doExecute(commands.scala:70)
	at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$5.apply(SparkPlan.scala:132)
	at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$5.apply(SparkPlan.scala:130)
	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:150)
	at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:130)
	at org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:55)
	at org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:55)
	at org.apache.spark.sql.DataFrame.<init>(DataFrame.scala:145)
	at org.apache.spark.sql.DataFrame.<init>(DataFrame.scala:130)
	at org.carbondata.spark.rdd.CarbonDataFrameRDD.<init>(CarbonDataFrameRDD.scala:23)
	at org.apache.spark.sql.CarbonContext.sql(CarbonContext.scala:130)
	at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:41)
	at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:46)
	at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:48)
	at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:50)
	at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:52)
	at $iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:54)
	at $iwC$$iwC$$iwC$$iwC.<init>(<console>:56)
	at $iwC$$iwC$$iwC.<init>(<console>:58)
	at $iwC$$iwC.<init>(<console>:60)
	at $iwC.<init>(<console>:62)
	at <init>(<console>:64)
	at .<init>(<console>:68)
	at .<clinit>(<console>)
	at .<init>(<console>:7)
	at .<clinit>(<console>)
	at $print(<console>)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.spark.repl.SparkIMain$ReadEvalPrint.call(SparkIMain.scala:1065)
	at org.apache.spark.repl.SparkIMain$Request.loadAndRun(SparkIMain.scala:1346)
	at org.apache.spark.repl.SparkIMain.loadAndRunReq$1(SparkIMain.scala:840)
	at org.apache.spark.repl.SparkIMain.interpret(SparkIMain.scala:871)
	at org.apache.spark.repl.SparkIMain.interpret(SparkIMain.scala:819)
	at org.apache.spark.repl.SparkILoop.reallyInterpret$1(SparkILoop.scala:857)
	at org.apache.spark.repl.SparkILoop.interpretStartingWith(SparkILoop.scala:902)
	at org.apache.spark.repl.SparkILoop.command(SparkILoop.scala:814)
	at org.apache.spark.repl.SparkILoop.processLine$1(SparkILoop.scala:657)
	at org.apache.spark.repl.SparkILoop.innerLoop$1(SparkILoop.scala:665)
	at org.apache.spark.repl.SparkILoop.org$apache$spark$repl$SparkILoop$$loop(SparkILoop.scala:670)
	at org.apache.spark.repl.SparkILoop$$anonfun$org$apache$spark$repl$SparkILoop$$process$1.apply$mcZ$sp(SparkILoop.scala:997)
	at org.apache.spark.repl.SparkILoop$$anonfun$org$apache$spark$repl$SparkILoop$$process$1.apply(SparkILoop.scala:945)
	at org.apache.spark.repl.SparkILoop$$anonfun$org$apache$spark$repl$SparkILoop$$process$1.apply(SparkILoop.scala:945)
	at scala.tools.nsc.util.ScalaClassLoader$.savingContextLoader(ScalaClassLoader.scala:135)
	at org.apache.spark.repl.SparkILoop.org$apache$spark$repl$SparkILoop$$process(SparkILoop.scala:945)
	at org.apache.spark.repl.SparkILoop.process(SparkILoop.scala:1059)
	at org.apache.spark.repl.Main$.main(Main.scala:31)
	at org.apache.spark.repl.Main.main(Main.scala)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.spark.deploy.SparkSubmit$.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:731)
	at org.apache.spark.deploy.SparkSubmit$.doRunMain$1(SparkSubmit.scala:181)
	at org.apache.spark.deploy.SparkSubmit$.submit(SparkSubmit.scala:206)
	at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:121)
	at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)



 thanks a lot!