You are viewing a plain text version of this content. The canonical link for it is here.
Posted to issues@spark.apache.org by "SHAILENDRA SHAHANE (JIRA)" <ji...@apache.org> on 2018/06/11 06:11:00 UTC

[jira] [Commented] (SPARK-24514) Exception while converting RDD to DataFrame

    [ https://issues.apache.org/jira/browse/SPARK-24514?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16507712#comment-16507712 ] 

SHAILENDRA SHAHANE commented on SPARK-24514:
--------------------------------------------

------Exception Stack Trace------

18/06/11 11:37:17 ERROR Executor: Exception in task 0.0 in stage 0.0 (TID 0)
org.apache.spark.sql.AnalysisException: Decimal scale (2) cannot be greater than precision (1).;
 at org.apache.spark.sql.types.DecimalType.<init>(DecimalType.scala:46)
 at org.apache.spark.sql.types.DecimalType$.apply(DecimalType.scala:43)
 at org.apache.spark.sql.types.DataTypes.createDecimalType(DataTypes.java:123)
 at com.mongodb.spark.sql.MongoInferSchema$.com$mongodb$spark$sql$MongoInferSchema$$getDataType(MongoInferSchema.scala:248)
 at com.mongodb.spark.sql.MongoInferSchema$$anonfun$com$mongodb$spark$sql$MongoInferSchema$$getSchemaFromDocument$1.apply(MongoInferSchema.scala:114)
 at com.mongodb.spark.sql.MongoInferSchema$$anonfun$com$mongodb$spark$sql$MongoInferSchema$$getSchemaFromDocument$1.apply(MongoInferSchema.scala:114)
 at scala.collection.Iterator$class.foreach(Iterator.scala:893)
 at scala.collection.AbstractIterator.foreach(Iterator.scala:1336)
 at scala.collection.IterableLike$class.foreach(IterableLike.scala:72)
 at scala.collection.AbstractIterable.foreach(Iterable.scala:54)
 at com.mongodb.spark.sql.MongoInferSchema$.com$mongodb$spark$sql$MongoInferSchema$$getSchemaFromDocument(MongoInferSchema.scala:114)
 at com.mongodb.spark.sql.MongoInferSchema$$anonfun$2.apply(MongoInferSchema.scala:78)
 at com.mongodb.spark.sql.MongoInferSchema$$anonfun$2.apply(MongoInferSchema.scala:78)
 at scala.collection.Iterator$$anon$11.next(Iterator.scala:409)
 at scala.collection.Iterator$class.foreach(Iterator.scala:893)
 at scala.collection.AbstractIterator.foreach(Iterator.scala:1336)
 at scala.collection.TraversableOnce$class.foldLeft(TraversableOnce.scala:157)
 at scala.collection.AbstractIterator.foldLeft(Iterator.scala:1336)
 at scala.collection.TraversableOnce$class.aggregate(TraversableOnce.scala:214)
 at scala.collection.AbstractIterator.aggregate(Iterator.scala:1336)
 at org.apache.spark.rdd.RDD$$anonfun$treeAggregate$1$$anonfun$23.apply(RDD.scala:1139)
 at org.apache.spark.rdd.RDD$$anonfun$treeAggregate$1$$anonfun$23.apply(RDD.scala:1139)
 at org.apache.spark.rdd.RDD$$anonfun$treeAggregate$1$$anonfun$24.apply(RDD.scala:1140)
 at org.apache.spark.rdd.RDD$$anonfun$treeAggregate$1$$anonfun$24.apply(RDD.scala:1140)
 at org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$23.apply(RDD.scala:800)
 at org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$23.apply(RDD.scala:800)
 at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
 at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324)
 at org.apache.spark.rdd.RDD.iterator(RDD.scala:288)
 at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
 at org.apache.spark.scheduler.Task.run(Task.scala:109)
 at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345)
 at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
 at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
 at java.lang.Thread.run(Thread.java:748)
18/06/11 11:37:17 ERROR TaskSetManager: Task 0 in stage 0.0 failed 1 times; aborting job
Exception in thread "main" org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 0.0 failed 1 times, most recent failure: Lost task 0.0 in stage 0.0 (TID 0, localhost, executor driver): org.apache.spark.sql.AnalysisException: Decimal scale (2) cannot be greater than precision (1).;
 at org.apache.spark.sql.types.DecimalType.<init>(DecimalType.scala:46)
 at org.apache.spark.sql.types.DecimalType$.apply(DecimalType.scala:43)
 at org.apache.spark.sql.types.DataTypes.createDecimalType(DataTypes.java:123)
 at com.mongodb.spark.sql.MongoInferSchema$.com$mongodb$spark$sql$MongoInferSchema$$getDataType(MongoInferSchema.scala:248)
 at com.mongodb.spark.sql.MongoInferSchema$$anonfun$com$mongodb$spark$sql$MongoInferSchema$$getSchemaFromDocument$1.apply(MongoInferSchema.scala:114)
 at com.mongodb.spark.sql.MongoInferSchema$$anonfun$com$mongodb$spark$sql$MongoInferSchema$$getSchemaFromDocument$1.apply(MongoInferSchema.scala:114)
 at scala.collection.Iterator$class.foreach(Iterator.scala:893)
 at scala.collection.AbstractIterator.foreach(Iterator.scala:1336)
 at scala.collection.IterableLike$class.foreach(IterableLike.scala:72)
 at scala.collection.AbstractIterable.foreach(Iterable.scala:54)
 at com.mongodb.spark.sql.MongoInferSchema$.com$mongodb$spark$sql$MongoInferSchema$$getSchemaFromDocument(MongoInferSchema.scala:114)
 at com.mongodb.spark.sql.MongoInferSchema$$anonfun$2.apply(MongoInferSchema.scala:78)
 at com.mongodb.spark.sql.MongoInferSchema$$anonfun$2.apply(MongoInferSchema.scala:78)
 at scala.collection.Iterator$$anon$11.next(Iterator.scala:409)
 at scala.collection.Iterator$class.foreach(Iterator.scala:893)
 at scala.collection.AbstractIterator.foreach(Iterator.scala:1336)
 at scala.collection.TraversableOnce$class.foldLeft(TraversableOnce.scala:157)
 at scala.collection.AbstractIterator.foldLeft(Iterator.scala:1336)
 at scala.collection.TraversableOnce$class.aggregate(TraversableOnce.scala:214)
 at scala.collection.AbstractIterator.aggregate(Iterator.scala:1336)
 at org.apache.spark.rdd.RDD$$anonfun$treeAggregate$1$$anonfun$23.apply(RDD.scala:1139)
 at org.apache.spark.rdd.RDD$$anonfun$treeAggregate$1$$anonfun$23.apply(RDD.scala:1139)
 at org.apache.spark.rdd.RDD$$anonfun$treeAggregate$1$$anonfun$24.apply(RDD.scala:1140)
 at org.apache.spark.rdd.RDD$$anonfun$treeAggregate$1$$anonfun$24.apply(RDD.scala:1140)
 at org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$23.apply(RDD.scala:800)
 at org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$23.apply(RDD.scala:800)
 at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
 at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324)
 at org.apache.spark.rdd.RDD.iterator(RDD.scala:288)
 at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
 at org.apache.spark.scheduler.Task.run(Task.scala:109)
 at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345)
 at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
 at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
 at java.lang.Thread.run(Thread.java:748)

Driver stacktrace:
 at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1599)
 at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1587)
 at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1586)
 at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
 at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
 at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1586)
 at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:831)
 at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:831)
 at scala.Option.foreach(Option.scala:257)
 at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:831)
 at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1820)
 at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1769)
 at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1758)
 at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48)
 at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:642)
 at org.apache.spark.SparkContext.runJob(SparkContext.scala:2027)
 at org.apache.spark.SparkContext.runJob(SparkContext.scala:2124)
 at org.apache.spark.rdd.RDD$$anonfun$fold$1.apply(RDD.scala:1092)
 at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
 at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
 at org.apache.spark.rdd.RDD.withScope(RDD.scala:363)
 at org.apache.spark.rdd.RDD.fold(RDD.scala:1086)
 at org.apache.spark.rdd.RDD$$anonfun$treeAggregate$1.apply(RDD.scala:1155)
 at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
 at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
 at org.apache.spark.rdd.RDD.withScope(RDD.scala:363)
 at org.apache.spark.rdd.RDD.treeAggregate(RDD.scala:1131)
 at com.mongodb.spark.sql.MongoInferSchema$.apply(MongoInferSchema.scala:78)
 at com.mongodb.spark.sql.DefaultSource.createRelation(DefaultSource.scala:75)
 at com.mongodb.spark.sql.DefaultSource.createRelation(DefaultSource.scala:50)
 at org.apache.spark.sql.execution.datasources.DataSource.resolveRelation(DataSource.scala:340)
 at org.apache.spark.sql.DataFrameReader.loadV1Source(DataFrameReader.scala:239)
 at org.apache.spark.sql.DataFrameReader.load(DataFrameReader.scala:227)
 at org.apache.spark.sql.DataFrameReader.load(DataFrameReader.scala:164)
 at p1.VP2.main(VP2.java:27)
 at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
 at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
 at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
 at java.lang.reflect.Method.invoke(Method.java:498)
 at org.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)
 at org.apache.spark.deploy.SparkSubmit$.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:879)
 at org.apache.spark.deploy.SparkSubmit$.doRunMain$1(SparkSubmit.scala:197)
 at org.apache.spark.deploy.SparkSubmit$.submit(SparkSubmit.scala:227)
 at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:136)
 at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)
Caused by: org.apache.spark.sql.AnalysisException: Decimal scale (2) cannot be greater than precision (1).;
 at org.apache.spark.sql.types.DecimalType.<init>(DecimalType.scala:46)
 at org.apache.spark.sql.types.DecimalType$.apply(DecimalType.scala:43)
 at org.apache.spark.sql.types.DataTypes.createDecimalType(DataTypes.java:123)
 at com.mongodb.spark.sql.MongoInferSchema$.com$mongodb$spark$sql$MongoInferSchema$$getDataType(MongoInferSchema.scala:248)
 at com.mongodb.spark.sql.MongoInferSchema$$anonfun$com$mongodb$spark$sql$MongoInferSchema$$getSchemaFromDocument$1.apply(MongoInferSchema.scala:114)
 at com.mongodb.spark.sql.MongoInferSchema$$anonfun$com$mongodb$spark$sql$MongoInferSchema$$getSchemaFromDocument$1.apply(MongoInferSchema.scala:114)
 at scala.collection.Iterator$class.foreach(Iterator.scala:893)
 at scala.collection.AbstractIterator.foreach(Iterator.scala:1336)
 at scala.collection.IterableLike$class.foreach(IterableLike.scala:72)
 at scala.collection.AbstractIterable.foreach(Iterable.scala:54)
 at com.mongodb.spark.sql.MongoInferSchema$.com$mongodb$spark$sql$MongoInferSchema$$getSchemaFromDocument(MongoInferSchema.scala:114)
 at com.mongodb.spark.sql.MongoInferSchema$$anonfun$2.apply(MongoInferSchema.scala:78)
 at com.mongodb.spark.sql.MongoInferSchema$$anonfun$2.apply(MongoInferSchema.scala:78)
 at scala.collection.Iterator$$anon$11.next(Iterator.scala:409)
 at scala.collection.Iterator$class.foreach(Iterator.scala:893)
 at scala.collection.AbstractIterator.foreach(Iterator.scala:1336)
 at scala.collection.TraversableOnce$class.foldLeft(TraversableOnce.scala:157)
 at scala.collection.AbstractIterator.foldLeft(Iterator.scala:1336)
 at scala.collection.TraversableOnce$class.aggregate(TraversableOnce.scala:214)
 at scala.collection.AbstractIterator.aggregate(Iterator.scala:1336)
 at org.apache.spark.rdd.RDD$$anonfun$treeAggregate$1$$anonfun$23.apply(RDD.scala:1139)
 at org.apache.spark.rdd.RDD$$anonfun$treeAggregate$1$$anonfun$23.apply(RDD.scala:1139)
 at org.apache.spark.rdd.RDD$$anonfun$treeAggregate$1$$anonfun$24.apply(RDD.scala:1140)
 at org.apache.spark.rdd.RDD$$anonfun$treeAggregate$1$$anonfun$24.apply(RDD.scala:1140)
 at org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$23.apply(RDD.scala:800)
 at org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$23.apply(RDD.scala:800)
 at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
 at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324)
 at org.apache.spark.rdd.RDD.iterator(RDD.scala:288)
 at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
 at org.apache.spark.scheduler.Task.run(Task.scala:109)
 at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345)
 at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
 at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
 at java.lang.Thread.run(Thread.java:748)

> Exception while converting RDD to DataFrame
> -------------------------------------------
>
>                 Key: SPARK-24514
>                 URL: https://issues.apache.org/jira/browse/SPARK-24514
>             Project: Spark
>          Issue Type: Bug
>          Components: Java API, Spark Core, SQL
>    Affects Versions: 2.3.0
>         Environment: Spark version 2.3
> JDK 1.8
> MongoDB Version - 3.4 and 3.6
> Sample Data -
> {"_id":"5b0d31f892549e10b61d962a","RSEG_MANDT":"800","RSEG_EBELN":"4500017749","RSEG_EBELP":"00020","RSEG_BELNR":"1000000001","RSEG_BUZEI":"000002","RSEG_GJAHR":"2013","RBKP_BUDAT":"2013-10-04","RSEG_MENGE":{"$numberDecimal":"30.000"},"RSEG_LFBNR":"5000000472","RSEG_LFGJA":"2013","RSEG_LFPOS":"0002","NOT_ACCOUNT_MAINTENANCE":{"$numberDecimal":"1.0000000000"},"RBKP_CPUTIMESTAMP":"2013-10-04T10:32:02.000Z","RBKP_WAERS":"USD","RSEG_BNKAN":{"$numberDecimal":"0.00"},"RSEG_WRBTR":{"$numberDecimal":"2340.00"},"RSEG_SHKZG":"S"}
>            Reporter: SHAILENDRA SHAHANE
>            Priority: Major
>
> I tried to fetch data from MongoDB and got the following exception while converting the RDD to DF.
> -----------------Code --------------
> SparkSession spark = sparkSessionBuilder.appName("VendorProcessor")
> .config("spark.mongodb.input.uri","myMongoUrl")
> .config("spark.mongodb.output.uri","myMongoUrl")
> .getOrCreate();
> JavaSparkContext jsc = new JavaSparkContext(spark.sparkContext());
> JavaMongoRDD<Document> rdd = MongoSpark.load(jsc);
> Dataset<Row> rbkp = rdd.toDF();
> ------------------ OR ------------------------
> DataFrameReader dfr = spark.read()
> .format("com.mongodb.spark.sql") 
> .option("floatAsBigDecimal", "true");
> Dataset<Row> rbkp = dfr.load();
> --------------------
>  



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscribe@spark.apache.org
For additional commands, e-mail: issues-help@spark.apache.org