You are viewing a plain text version of this content. The canonical link for it is here.
Posted to dev@hudi.apache.org by Sivabalan <n....@gmail.com> on 2020/01/13 18:03:10 UTC

Right Dependency for spark and scala in latest master

Hey folks,
   I am running into scala dependency issue w/ latest master while trying
to run the Quick Start. Can someone help me out on right dependency.

I see that with Udit's latest PR, we have to specify explicit packages for
spark-avro. Tried with spark-avro_2.11:2.4.4.

scala> df.write.format("org.apache.hudi").
     |     options(getQuickstartWriteConfigs).
     |     option(PRECOMBINE_FIELD_OPT_KEY, "ts").
     |     option(RECORDKEY_FIELD_OPT_KEY, "uuid").
     |     option(PARTITIONPATH_FIELD_OPT_KEY, "partitionpath").
     |     option(TABLE_NAME, tableName).
     |     mode(Overwrite).
     |     save(basePath);
java.util.ServiceConfigurationError:
org.apache.spark.sql.sources.DataSourceRegister: Provider
org.apache.spark.sql.avro.AvroFileFormat could not be instantiated
  at java.util.ServiceLoader.fail(ServiceLoader.java:232)
  at java.util.ServiceLoader.access$100(ServiceLoader.java:185)
  at
java.util.ServiceLoader$LazyIterator.nextService(ServiceLoader.java:384)
  at java.util.ServiceLoader$LazyIterator.next(ServiceLoader.java:404)
  at java.util.ServiceLoader$1.next(ServiceLoader.java:480)
  at
scala.collection.convert.Wrappers$JIteratorWrapper.next(Wrappers.scala:44)
  at scala.collection.Iterator.foreach(Iterator.scala:941)
  at scala.collection.Iterator.foreach$(Iterator.scala:941)
  at scala.collection.AbstractIterator.foreach(Iterator.scala:1429)
  at scala.collection.IterableLike.foreach(IterableLike.scala:74)
  at scala.collection.IterableLike.foreach$(IterableLike.scala:73)
  at scala.collection.AbstractIterable.foreach(Iterable.scala:56)
  at scala.collection.TraversableLike.filterImpl(TraversableLike.scala:255)
  at scala.collection.TraversableLike.filterImpl$(TraversableLike.scala:249)
  at scala.collection.AbstractTraversable.filterImpl(Traversable.scala:108)
  at scala.collection.TraversableLike.filter(TraversableLike.scala:347)
  at scala.collection.TraversableLike.filter$(TraversableLike.scala:347)
  at scala.collection.AbstractTraversable.filter(Traversable.scala:108)
  at
org.apache.spark.sql.execution.datasources.DataSource$.lookupDataSource(DataSource.scala:644)
  at
org.apache.spark.sql.execution.datasources.DataSource$.lookupDataSourceV2(DataSource.scala:728)
  at
org.apache.spark.sql.DataFrameWriter.lookupV2Provider(DataFrameWriter.scala:832)
  at org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:252)
  at org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:236)
  ... 66 elided
Caused by: java.lang.NoClassDefFoundError:
org/apache/spark/sql/execution/datasources/FileFormat$class
  at
org.apache.spark.sql.avro.AvroFileFormat.<init>(AvroFileFormat.scala:44)
  at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
  at
sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
  at
sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
  at java.lang.reflect.Constructor.newInstance(Constructor.java:423)
  at java.lang.Class.newInstance(Class.java:442)
  at
java.util.ServiceLoader$LazyIterator.nextService(ServiceLoader.java:380)
  ... 86 more
Caused by: java.lang.ClassNotFoundException:
org.apache.spark.sql.execution.datasources.FileFormat$class
  at java.net.URLClassLoader.findClass(URLClassLoader.java:382)
  at java.lang.ClassLoader.loadClass(ClassLoader.java:424)
  at java.lang.ClassLoader.loadClass(ClassLoader.java:357)
  ... 93 more


So, tried with 2.12.

./bin/spark-shell --packages org.apache.spark:spark-avro_2.12:2.4.4 --conf
'spark.serializer=org.apache.spark.serializer.KryoSerializer' --jars
/Users/sivabala/Documents/personal/projects/siva_hudi/hudi/packaging/hudi-spark-bundle/target/hudi-spark-bundle-0.5.1-SNAPSHOT.jar

scala> df.write.format("org.apache.hudi").
     |     options(getQuickstartWriteConfigs).
     |     option(PRECOMBINE_FIELD_OPT_KEY, "ts").
     |     option(RECORDKEY_FIELD_OPT_KEY, "uuid").
     |     option(PARTITIONPATH_FIELD_OPT_KEY, "partitionpath").
     |     option(TABLE_NAME, tableName).
     |     mode(Overwrite).
     |     save(basePath);
20/01/13 11:42:45 ERROR Executor: Exception in task 0.0 in stage 1.0 (TID 2)
java.lang.NoSuchMethodError:
scala.Predef$.refArrayOps([Ljava/lang/Object;)Lscala/collection/mutable/ArrayOps;
at
org.apache.hudi.AvroConversionHelper$.createConverterToAvro(AvroConversionHelper.scala:341)
at
org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:46)
at
org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:42)
at org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2(RDD.scala:837)
at org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2$adapted(RDD.scala:837)
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
at org.apache.spark.scheduler.Task.run(Task.scala:127)
at
org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
at
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
20/01/13 11:42:46 WARN TaskSetManager: Lost task 0.0 in stage 1.0 (TID 2,
192.168.1.209, executor driver): java.lang.NoSuchMethodError:
scala.Predef$.refArrayOps([Ljava/lang/Object;)Lscala/collection/mutable/ArrayOps;
at
org.apache.hudi.AvroConversionHelper$.createConverterToAvro(AvroConversionHelper.scala:341)
at
org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:46)
at
org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:42)
at org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2(RDD.scala:837)
at org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2$adapted(RDD.scala:837)
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
at org.apache.spark.scheduler.Task.run(Task.scala:127)
at
org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
at
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)

20/01/13 11:42:46 ERROR TaskSetManager: Task 0 in stage 1.0 failed 1 times;
aborting job
org.apache.spark.SparkException: Job aborted due to stage failure: Task 0
in stage 1.0 failed 1 times, most recent failure: Lost task 0.0 in stage
1.0 (TID 2, 192.168.1.209, executor driver): java.lang.NoSuchMethodError:
scala.Predef$.refArrayOps([Ljava/lang/Object;)Lscala/collection/mutable/ArrayOps;
at
org.apache.hudi.AvroConversionHelper$.createConverterToAvro(AvroConversionHelper.scala:341)
at
org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:46)
at
org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:42)
at org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2(RDD.scala:837)
at org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2$adapted(RDD.scala:837)
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
at org.apache.spark.scheduler.Task.run(Task.scala:127)
at
org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
at
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)

Driver stacktrace:
  at
org.apache.spark.scheduler.DAGScheduler.failJobAndIndependentStages(DAGScheduler.scala:1989)
  at
org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:1977)
  at
org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:1976)
  at
scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62)
  at
scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55)
  at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49)
  at
org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1976)
  at
org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:956)
  at
org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:956)
  at scala.Option.foreach(Option.scala:407)
  at
org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:956)
  at
org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2206)
  at
org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2155)
  at
org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2144)
  at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
  at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:758)
  at org.apache.spark.SparkContext.runJob(SparkContext.scala:2116)
  at org.apache.spark.SparkContext.runJob(SparkContext.scala:2137)
  at org.apache.spark.SparkContext.runJob(SparkContext.scala:2156)
  at org.apache.spark.rdd.RDD.$anonfun$take$1(RDD.scala:1423)
  at
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
  at
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
  at org.apache.spark.rdd.RDD.withScope(RDD.scala:388)
  at org.apache.spark.rdd.RDD.take(RDD.scala:1396)
  at org.apache.spark.rdd.RDD.$anonfun$isEmpty$1(RDD.scala:1531)
  at scala.runtime.java8.JFunction0$mcZ$sp.apply(JFunction0$mcZ$sp.java:23)
  at
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
  at
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
  at org.apache.spark.rdd.RDD.withScope(RDD.scala:388)
  at org.apache.spark.rdd.RDD.isEmpty(RDD.scala:1531)
  at org.apache.spark.api.java.JavaRDDLike.isEmpty(JavaRDDLike.scala:544)
  at org.apache.spark.api.java.JavaRDDLike.isEmpty$(JavaRDDLike.scala:544)
  at
org.apache.spark.api.java.AbstractJavaRDDLike.isEmpty(JavaRDDLike.scala:45)
  at
org.apache.hudi.HoodieSparkSqlWriter$.write(HoodieSparkSqlWriter.scala:141)
  at org.apache.hudi.DefaultSource.createRelation(DefaultSource.scala:91)
  at
org.apache.spark.sql.execution.datasources.SaveIntoDataSourceCommand.run(SaveIntoDataSourceCommand.scala:46)
  at
org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:70)
  at
org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:68)
  at
org.apache.spark.sql.execution.command.ExecutedCommandExec.doExecute(commands.scala:86)
  at
org.apache.spark.sql.execution.SparkPlan.$anonfun$execute$1(SparkPlan.scala:173)
  at
org.apache.spark.sql.execution.SparkPlan.$anonfun$executeQuery$1(SparkPlan.scala:211)
  at
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
  at
org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:208)
  at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:169)
  at
org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:110)
  at
org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:109)
  at
org.apache.spark.sql.DataFrameWriter.$anonfun$runCommand$1(DataFrameWriter.scala:828)
  at
org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$4(SQLExecution.scala:100)
  at
org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:160)
  at
org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:87)
  at
org.apache.spark.sql.DataFrameWriter.runCommand(DataFrameWriter.scala:828)
  at
org.apache.spark.sql.DataFrameWriter.saveToV1Source(DataFrameWriter.scala:309)
  at org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:293)
  at org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:236)
  ... 66 elided
Caused by: java.lang.NoSuchMethodError:
scala.Predef$.refArrayOps([Ljava/lang/Object;)Lscala/collection/mutable/ArrayOps;
  at
org.apache.hudi.AvroConversionHelper$.createConverterToAvro(AvroConversionHelper.scala:341)
  at
org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:46)
  at
org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:42)
  at org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2(RDD.scala:837)
  at
org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2$adapted(RDD.scala:837)
  at
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
  at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
  at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
  at
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
  at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
  at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
  at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
  at org.apache.spark.scheduler.Task.run(Task.scala:127)
  at
org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
  at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
  at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
  at
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
  at
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
  at java.lang.Thread.run(Thread.java:748)

Just to unblock my work, I reverted my repo to a commit just before Udi'ts
PR(git checkout d9675c4ec0be3f342c30e17a4779c8319b207681) and tried running
the same.

./bin/spark-shell --packages com.databricks:spark-avro_2.11:3.2.0 --conf
'spark.serializer=org.apache.spark.serializer.KryoSerializer' --jars
/Users/sivabala/Documents/personal/projects/siva_hudi/hudi/packaging/hudi-spark-bundle/target/hudi-spark-bundle-0.5.1-SNAPSHOT.jar

// initial imports.
..
..

scala> df.write.format("org.apache.hudi").
     |     options(getQuickstartWriteConfigs).
     |     option(PRECOMBINE_FIELD_OPT_KEY, "ts").
     |     option(RECORDKEY_FIELD_OPT_KEY, "uuid").
     |     option(PARTITIONPATH_FIELD_OPT_KEY, "partitionpath").
     |     option(TABLE_NAME, tableName).
     |     mode(Overwrite).
     |     save(basePath);
java.lang.NoSuchMethodError:
scala.Predef$.refArrayOps([Ljava/lang/Object;)Lscala/collection/mutable/ArrayOps;
  at
org.apache.hudi.com.databricks.spark.avro.SchemaConverters$.convertStructToAvro(SchemaConverters.scala:118)
  at
org.apache.hudi.AvroConversionUtils$.convertStructTypeToAvroSchema(AvroConversionUtils.scala:79)
  at
org.apache.hudi.HoodieSparkSqlWriter$.write(HoodieSparkSqlWriter.scala:92)
  at org.apache.hudi.DefaultSource.createRelation(DefaultSource.scala:91)
  at
org.apache.spark.sql.execution.datasources.SaveIntoDataSourceCommand.run(SaveIntoDataSourceCommand.scala:46)
  at
org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:70)
  at
org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:68)
  at
org.apache.spark.sql.execution.command.ExecutedCommandExec.doExecute(commands.scala:86)
  at
org.apache.spark.sql.execution.SparkPlan.$anonfun$execute$1(SparkPlan.scala:173)
  at
org.apache.spark.sql.execution.SparkPlan.$anonfun$executeQuery$1(SparkPlan.scala:211)
  at
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
  at
org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:208)
  at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:169)
  at
org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:110)
  at
org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:109)
  at
org.apache.spark.sql.DataFrameWriter.$anonfun$runCommand$1(DataFrameWriter.scala:828)
  at
org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$4(SQLExecution.scala:100)
  at
org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:160)
  at
org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:87)
  at
org.apache.spark.sql.DataFrameWriter.runCommand(DataFrameWriter.scala:828)
  at
org.apache.spark.sql.DataFrameWriter.saveToV1Source(DataFrameWriter.scala:309)
  at org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:293)
  at org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:236)


-- 
Regards,
-Sivabalan

Re: Right Dependency for spark and scala in latest master

Posted by Vinoth Chandar <vi...@apache.org>.
Images don't work on the mailing list :) . But good that its working as
expected

On Tue, Jan 14, 2020 at 1:31 PM Sivabalan <n....@gmail.com> wrote:

> Sorry, sent before attaching screen shots.
>
>
>
> On Tue, Jan 14, 2020 at 4:26 PM Sivabalan <n....@gmail.com> wrote:
>
>> 3.x is not available under spark-avro_2.11. It is available only with
>> 2.12 and since 2.12 is not recommended, we are good. I verified that 2.4.4
>> works for me if both spark shell and packages are using 2.4.4.
>>
>>
>> On Tue, Jan 14, 2020 at 12:19 PM Vinoth Chandar <vi...@apache.org>
>> wrote:
>>
>>> Siva, can you please confirm that if you match the spark version (version
>>> of spark-shell) with the version of spark-avro, things work for both
>>> 2.4.4
>>> and 3.x? Else this is a release blocker.
>>>
>>> On Tue, Jan 14, 2020 at 6:45 AM Sivabalan <n....@gmail.com> wrote:
>>>
>>> > cool, thanks for the assistance Sudha. We have to fix the quick start
>>> docs
>>> > then accordingly.
>>> >
>>> >
>>> > On Tue, Jan 14, 2020 at 2:28 AM Bhavani Sudha <bhavanisudhas@gmail.com
>>> >
>>> > wrote:
>>> >
>>> > > Hi Siva,
>>> > >
>>> > > I was able to get past this issue by running from spark-shell( from
>>> > version
>>> > > 2.4.4) and spark-avro (org.apache.spark:spark-avro_2.11:2.4.4). This
>>> is
>>> > my
>>> > > command line for starting spark shell just for reference.
>>> > >
>>> > > spark-2.4.4-bin-hadoop2.7/bin/spark-shell --jars
>>> > >
>>> > >
>>> >
>>> /<path_to_hudi>/packaging/hudi-spark-bundle/target/hudi-spark-bundle-0.5.1-SNAPSHOT.jar
>>> > > --packages org.apache.spark:spark-avro_2.11:2.4.4 --conf
>>> > > 'spark.serializer=org.apache.spark.serializer.KryoSerializer'
>>> > >
>>> > > I think we have to match both spark-shell version and corresponding
>>> > > spark-avro version to 2.4.4. Please try this to see if this unblocks
>>> you.
>>> > >
>>> > > Thanks,
>>> > > Sudha
>>> > >
>>> > > On Mon, Jan 13, 2020 at 6:29 PM Vinoth Chandar <vi...@apache.org>
>>> > wrote:
>>> > >
>>> > > > I will triage this tonight and get back!
>>> > > >
>>> > > > On Mon, Jan 13, 2020 at 2:28 PM Sivabalan <n....@gmail.com>
>>> wrote:
>>> > > >
>>> > > > > Yes, that is what I tried. Is there any recommended version. I
>>> tried
>>> > > with
>>> > > > > 2.4.4. (My local spark from which I ran spark_shell
>>> > > > > is spark-3.0.0-preview2, guess that does not matter).
>>> > > > >
>>> > > > > ./bin/spark-shell --packages
>>> org.apache.spark:spark-avro_2.11:2.4.4
>>> > > > --conf
>>> > > > > 'spark.serializer=org.apache.spark.serializer.KryoSerializer'
>>> --jars
>>> > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> /Users/sivabala/Documents/personal/projects/siva_hudi/hudi/packaging/hudi-spark-bundle/target/hudi-spark-bundle-0.5.1-SNAPSHOT.jar
>>> > > > >
>>> > > > >
>>> > > > > On Mon, Jan 13, 2020 at 3:54 PM Vinoth Chandar <
>>> vinoth@apache.org>
>>> > > > wrote:
>>> > > > >
>>> > > > > > Hi Siva,
>>> > > > > >
>>> > > > > > In general, we need to match the
>>> > > > > >  spark-avro_2.11:<spark_version_you_are_running> .. With this
>>> > change,
>>> > > > we
>>> > > > > > effectively dropped support for spark versions older than 2.4.
>>> > > > > > Are you running on a older spark version?
>>> > > > > >
>>> > > > > >
>>> > > > > >
>>> > > > > > On Mon, Jan 13, 2020 at 10:03 AM Sivabalan <n.siva.b@gmail.com
>>> >
>>> > > wrote:
>>> > > > > >
>>> > > > > > > Hey folks,
>>> > > > > > >    I am running into scala dependency issue w/ latest master
>>> > while
>>> > > > > trying
>>> > > > > > > to run the Quick Start. Can someone help me out on right
>>> > > dependency.
>>> > > > > > >
>>> > > > > > > I see that with Udit's latest PR, we have to specify explicit
>>> > > > packages
>>> > > > > > for
>>> > > > > > > spark-avro. Tried with spark-avro_2.11:2.4.4.
>>> > > > > > >
>>> > > > > > > scala> df.write.format("org.apache.hudi").
>>> > > > > > >      |     options(getQuickstartWriteConfigs).
>>> > > > > > >      |     option(PRECOMBINE_FIELD_OPT_KEY, "ts").
>>> > > > > > >      |     option(RECORDKEY_FIELD_OPT_KEY, "uuid").
>>> > > > > > >      |     option(PARTITIONPATH_FIELD_OPT_KEY,
>>> "partitionpath").
>>> > > > > > >      |     option(TABLE_NAME, tableName).
>>> > > > > > >      |     mode(Overwrite).
>>> > > > > > >      |     save(basePath);
>>> > > > > > > java.util.ServiceConfigurationError:
>>> > > > > > > org.apache.spark.sql.sources.DataSourceRegister: Provider
>>> > > > > > > org.apache.spark.sql.avro.AvroFileFormat could not be
>>> > instantiated
>>> > > > > > >   at java.util.ServiceLoader.fail(ServiceLoader.java:232)
>>> > > > > > >   at
>>> java.util.ServiceLoader.access$100(ServiceLoader.java:185)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > >
>>> > >
>>> java.util.ServiceLoader$LazyIterator.nextService(ServiceLoader.java:384)
>>> > > > > > >   at
>>> > > > java.util.ServiceLoader$LazyIterator.next(ServiceLoader.java:404)
>>> > > > > > >   at java.util.ServiceLoader$1.next(ServiceLoader.java:480)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> scala.collection.convert.Wrappers$JIteratorWrapper.next(Wrappers.scala:44)
>>> > > > > > >   at scala.collection.Iterator.foreach(Iterator.scala:941)
>>> > > > > > >   at scala.collection.Iterator.foreach$(Iterator.scala:941)
>>> > > > > > >   at
>>> > scala.collection.AbstractIterator.foreach(Iterator.scala:1429)
>>> > > > > > >   at
>>> scala.collection.IterableLike.foreach(IterableLike.scala:74)
>>> > > > > > >   at
>>> > scala.collection.IterableLike.foreach$(IterableLike.scala:73)
>>> > > > > > >   at
>>> scala.collection.AbstractIterable.foreach(Iterable.scala:56)
>>> > > > > > >   at
>>> > > > > >
>>> > >
>>> scala.collection.TraversableLike.filterImpl(TraversableLike.scala:255)
>>> > > > > > >   at
>>> > > > > > >
>>> > > >
>>> scala.collection.TraversableLike.filterImpl$(TraversableLike.scala:249)
>>> > > > > > >   at
>>> > > > > >
>>> > >
>>> scala.collection.AbstractTraversable.filterImpl(Traversable.scala:108)
>>> > > > > > >   at
>>> > > > scala.collection.TraversableLike.filter(TraversableLike.scala:347)
>>> > > > > > >   at
>>> > > > >
>>> scala.collection.TraversableLike.filter$(TraversableLike.scala:347)
>>> > > > > > >   at
>>> > > > scala.collection.AbstractTraversable.filter(Traversable.scala:108)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.spark.sql.execution.datasources.DataSource$.lookupDataSource(DataSource.scala:644)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.spark.sql.execution.datasources.DataSource$.lookupDataSourceV2(DataSource.scala:728)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.spark.sql.DataFrameWriter.lookupV2Provider(DataFrameWriter.scala:832)
>>> > > > > > >   at
>>> > > > >
>>> org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:252)
>>> > > > > > >   at
>>> > > > >
>>> org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:236)
>>> > > > > > >   ... 66 elided
>>> > > > > > > Caused by: java.lang.NoClassDefFoundError:
>>> > > > > > > org/apache/spark/sql/execution/datasources/FileFormat$class
>>> > > > > > >   at
>>> > > > > > >
>>> > > > >
>>> > >
>>> org.apache.spark.sql.avro.AvroFileFormat.<init>(AvroFileFormat.scala:44)
>>> > > > > > >   at
>>> > sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native
>>> > > > > > Method)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
>>> > > > > > >   at
>>> > > java.lang.reflect.Constructor.newInstance(Constructor.java:423)
>>> > > > > > >   at java.lang.Class.newInstance(Class.java:442)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > >
>>> > >
>>> java.util.ServiceLoader$LazyIterator.nextService(ServiceLoader.java:380)
>>> > > > > > >   ... 86 more
>>> > > > > > > Caused by: java.lang.ClassNotFoundException:
>>> > > > > > > org.apache.spark.sql.execution.datasources.FileFormat$class
>>> > > > > > >   at
>>> java.net.URLClassLoader.findClass(URLClassLoader.java:382)
>>> > > > > > >   at java.lang.ClassLoader.loadClass(ClassLoader.java:424)
>>> > > > > > >   at java.lang.ClassLoader.loadClass(ClassLoader.java:357)
>>> > > > > > >   ... 93 more
>>> > > > > > >
>>> > > > > > >
>>> > > > > > > So, tried with 2.12.
>>> > > > > > >
>>> > > > > > > ./bin/spark-shell --packages
>>> > org.apache.spark:spark-avro_2.12:2.4.4
>>> > > > > > --conf
>>> > > > > > > 'spark.serializer=org.apache.spark.serializer.KryoSerializer'
>>> > > --jars
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> /Users/sivabala/Documents/personal/projects/siva_hudi/hudi/packaging/hudi-spark-bundle/target/hudi-spark-bundle-0.5.1-SNAPSHOT.jar
>>> > > > > > >
>>> > > > > > > scala> df.write.format("org.apache.hudi").
>>> > > > > > >      |     options(getQuickstartWriteConfigs).
>>> > > > > > >      |     option(PRECOMBINE_FIELD_OPT_KEY, "ts").
>>> > > > > > >      |     option(RECORDKEY_FIELD_OPT_KEY, "uuid").
>>> > > > > > >      |     option(PARTITIONPATH_FIELD_OPT_KEY,
>>> "partitionpath").
>>> > > > > > >      |     option(TABLE_NAME, tableName).
>>> > > > > > >      |     mode(Overwrite).
>>> > > > > > >      |     save(basePath);
>>> > > > > > > 20/01/13 11:42:45 ERROR Executor: Exception in task 0.0 in
>>> stage
>>> > > 1.0
>>> > > > > (TID
>>> > > > > > > 2)
>>> > > > > > > java.lang.NoSuchMethodError:
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> scala.Predef$.refArrayOps([Ljava/lang/Object;)Lscala/collection/mutable/ArrayOps;
>>> > > > > > > at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.hudi.AvroConversionHelper$.createConverterToAvro(AvroConversionHelper.scala:341)
>>> > > > > > > at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:46)
>>> > > > > > > at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:42)
>>> > > > > > > at
>>> > org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2(RDD.scala:837)
>>> > > > > > > at
>>> > > > > >
>>> > > >
>>> >
>>> org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2$adapted(RDD.scala:837)
>>> > > > > > > at
>>> > > > > >
>>> > > >
>>> >
>>> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
>>> > > > > > > at
>>> > org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
>>> > > > > > > at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
>>> > > > > > > at
>>> > > > > >
>>> > > >
>>> >
>>> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
>>> > > > > > > at
>>> > org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
>>> > > > > > > at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
>>> > > > > > > at
>>> > > org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
>>> > > > > > > at org.apache.spark.scheduler.Task.run(Task.scala:127)
>>> > > > > > > at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
>>> > > > > > > at
>>> > > org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
>>> > > > > > > at
>>> > > > >
>>> org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
>>> > > > > > > at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
>>> > > > > > > at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
>>> > > > > > > at java.lang.Thread.run(Thread.java:748)
>>> > > > > > > 20/01/13 11:42:46 WARN TaskSetManager: Lost task 0.0 in
>>> stage 1.0
>>> > > > (TID
>>> > > > > 2,
>>> > > > > > > 192.168.1.209, executor driver): java.lang.NoSuchMethodError:
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> scala.Predef$.refArrayOps([Ljava/lang/Object;)Lscala/collection/mutable/ArrayOps;
>>> > > > > > > at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.hudi.AvroConversionHelper$.createConverterToAvro(AvroConversionHelper.scala:341)
>>> > > > > > > at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:46)
>>> > > > > > > at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:42)
>>> > > > > > > at
>>> > org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2(RDD.scala:837)
>>> > > > > > > at
>>> > > > > >
>>> > > >
>>> >
>>> org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2$adapted(RDD.scala:837)
>>> > > > > > > at
>>> > > > > >
>>> > > >
>>> >
>>> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
>>> > > > > > > at
>>> > org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
>>> > > > > > > at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
>>> > > > > > > at
>>> > > > > >
>>> > > >
>>> >
>>> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
>>> > > > > > > at
>>> > org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
>>> > > > > > > at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
>>> > > > > > > at
>>> > > org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
>>> > > > > > > at org.apache.spark.scheduler.Task.run(Task.scala:127)
>>> > > > > > > at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
>>> > > > > > > at
>>> > > org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
>>> > > > > > > at
>>> > > > >
>>> org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
>>> > > > > > > at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
>>> > > > > > > at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
>>> > > > > > > at java.lang.Thread.run(Thread.java:748)
>>> > > > > > >
>>> > > > > > > 20/01/13 11:42:46 ERROR TaskSetManager: Task 0 in stage 1.0
>>> > failed
>>> > > 1
>>> > > > > > times;
>>> > > > > > > aborting job
>>> > > > > > > org.apache.spark.SparkException: Job aborted due to stage
>>> > failure:
>>> > > > > Task 0
>>> > > > > > > in stage 1.0 failed 1 times, most recent failure: Lost task
>>> 0.0
>>> > in
>>> > > > > stage
>>> > > > > > > 1.0 (TID 2, 192.168.1.209, executor driver):
>>> > > > > java.lang.NoSuchMethodError:
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> scala.Predef$.refArrayOps([Ljava/lang/Object;)Lscala/collection/mutable/ArrayOps;
>>> > > > > > > at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.hudi.AvroConversionHelper$.createConverterToAvro(AvroConversionHelper.scala:341)
>>> > > > > > > at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:46)
>>> > > > > > > at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:42)
>>> > > > > > > at
>>> > org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2(RDD.scala:837)
>>> > > > > > > at
>>> > > > > >
>>> > > >
>>> >
>>> org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2$adapted(RDD.scala:837)
>>> > > > > > > at
>>> > > > > >
>>> > > >
>>> >
>>> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
>>> > > > > > > at
>>> > org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
>>> > > > > > > at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
>>> > > > > > > at
>>> > > > > >
>>> > > >
>>> >
>>> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
>>> > > > > > > at
>>> > org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
>>> > > > > > > at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
>>> > > > > > > at
>>> > > org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
>>> > > > > > > at org.apache.spark.scheduler.Task.run(Task.scala:127)
>>> > > > > > > at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
>>> > > > > > > at
>>> > > org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
>>> > > > > > > at
>>> > > > >
>>> org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
>>> > > > > > > at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
>>> > > > > > > at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
>>> > > > > > > at java.lang.Thread.run(Thread.java:748)
>>> > > > > > >
>>> > > > > > > Driver stacktrace:
>>> > > > > > >   at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.spark.scheduler.DAGScheduler.failJobAndIndependentStages(DAGScheduler.scala:1989)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:1977)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:1976)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > >
>>> > >
>>> scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > >
>>> > >
>>> scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55)
>>> > > > > > >   at
>>> > > > scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1976)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:956)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:956)
>>> > > > > > >   at scala.Option.foreach(Option.scala:407)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:956)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2206)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2155)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2144)
>>> > > > > > >   at
>>> > > org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
>>> > > > > > >   at
>>> > > > > >
>>> > >
>>> org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:758)
>>> > > > > > >   at
>>> > org.apache.spark.SparkContext.runJob(SparkContext.scala:2116)
>>> > > > > > >   at
>>> > org.apache.spark.SparkContext.runJob(SparkContext.scala:2137)
>>> > > > > > >   at
>>> > org.apache.spark.SparkContext.runJob(SparkContext.scala:2156)
>>> > > > > > >   at org.apache.spark.rdd.RDD.$anonfun$take$1(RDD.scala:1423)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
>>> > > > > > >   at org.apache.spark.rdd.RDD.withScope(RDD.scala:388)
>>> > > > > > >   at org.apache.spark.rdd.RDD.take(RDD.scala:1396)
>>> > > > > > >   at
>>> org.apache.spark.rdd.RDD.$anonfun$isEmpty$1(RDD.scala:1531)
>>> > > > > > >   at
>>> > > > > >
>>> > >
>>> scala.runtime.java8.JFunction0$mcZ$sp.apply(JFunction0$mcZ$sp.java:23)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
>>> > > > > > >   at org.apache.spark.rdd.RDD.withScope(RDD.scala:388)
>>> > > > > > >   at org.apache.spark.rdd.RDD.isEmpty(RDD.scala:1531)
>>> > > > > > >   at
>>> > > > >
>>> org.apache.spark.api.java.JavaRDDLike.isEmpty(JavaRDDLike.scala:544)
>>> > > > > > >   at
>>> > > > > >
>>> > org.apache.spark.api.java.JavaRDDLike.isEmpty$(JavaRDDLike.scala:544)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.spark.api.java.AbstractJavaRDDLike.isEmpty(JavaRDDLike.scala:45)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.hudi.HoodieSparkSqlWriter$.write(HoodieSparkSqlWriter.scala:141)
>>> > > > > > >   at
>>> > > > >
>>> org.apache.hudi.DefaultSource.createRelation(DefaultSource.scala:91)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.spark.sql.execution.datasources.SaveIntoDataSourceCommand.run(SaveIntoDataSourceCommand.scala:46)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:70)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:68)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.spark.sql.execution.command.ExecutedCommandExec.doExecute(commands.scala:86)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.spark.sql.execution.SparkPlan.$anonfun$execute$1(SparkPlan.scala:173)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.spark.sql.execution.SparkPlan.$anonfun$executeQuery$1(SparkPlan.scala:211)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:208)
>>> > > > > > >   at
>>> > > > > >
>>> > org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:169)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:110)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:109)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.spark.sql.DataFrameWriter.$anonfun$runCommand$1(DataFrameWriter.scala:828)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$4(SQLExecution.scala:100)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:160)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:87)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.spark.sql.DataFrameWriter.runCommand(DataFrameWriter.scala:828)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.spark.sql.DataFrameWriter.saveToV1Source(DataFrameWriter.scala:309)
>>> > > > > > >   at
>>> > > > >
>>> org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:293)
>>> > > > > > >   at
>>> > > > >
>>> org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:236)
>>> > > > > > >   ... 66 elided
>>> > > > > > > Caused by: java.lang.NoSuchMethodError:
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> scala.Predef$.refArrayOps([Ljava/lang/Object;)Lscala/collection/mutable/ArrayOps;
>>> > > > > > >   at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.hudi.AvroConversionHelper$.createConverterToAvro(AvroConversionHelper.scala:341)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:46)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:42)
>>> > > > > > >   at
>>> > > org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2(RDD.scala:837)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > >
>>> > >
>>> org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2$adapted(RDD.scala:837)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > >
>>> > >
>>> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
>>> > > > > > >   at
>>> > > org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
>>> > > > > > >   at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > >
>>> > >
>>> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
>>> > > > > > >   at
>>> > > org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
>>> > > > > > >   at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
>>> > > > > > >   at
>>> > > > org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
>>> > > > > > >   at org.apache.spark.scheduler.Task.run(Task.scala:127)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
>>> > > > > > >   at
>>> > > > org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
>>> > > > > > >   at
>>> > > > > >
>>> > org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
>>> > > > > > >   at java.lang.Thread.run(Thread.java:748)
>>> > > > > > >
>>> > > > > > > Just to unblock my work, I reverted my repo to a commit just
>>> > before
>>> > > > > > Udi'ts
>>> > > > > > > PR(git checkout d9675c4ec0be3f342c30e17a4779c8319b207681) and
>>> > tried
>>> > > > > > running
>>> > > > > > > the same.
>>> > > > > > >
>>> > > > > > > ./bin/spark-shell --packages
>>> com.databricks:spark-avro_2.11:3.2.0
>>> > > > > --conf
>>> > > > > > > 'spark.serializer=org.apache.spark.serializer.KryoSerializer'
>>> > > --jars
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> /Users/sivabala/Documents/personal/projects/siva_hudi/hudi/packaging/hudi-spark-bundle/target/hudi-spark-bundle-0.5.1-SNAPSHOT.jar
>>> > > > > > >
>>> > > > > > > // initial imports.
>>> > > > > > > ..
>>> > > > > > > ..
>>> > > > > > >
>>> > > > > > > scala> df.write.format("org.apache.hudi").
>>> > > > > > >      |     options(getQuickstartWriteConfigs).
>>> > > > > > >      |     option(PRECOMBINE_FIELD_OPT_KEY, "ts").
>>> > > > > > >      |     option(RECORDKEY_FIELD_OPT_KEY, "uuid").
>>> > > > > > >      |     option(PARTITIONPATH_FIELD_OPT_KEY,
>>> "partitionpath").
>>> > > > > > >      |     option(TABLE_NAME, tableName).
>>> > > > > > >      |     mode(Overwrite).
>>> > > > > > >      |     save(basePath);
>>> > > > > > > java.lang.NoSuchMethodError:
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> scala.Predef$.refArrayOps([Ljava/lang/Object;)Lscala/collection/mutable/ArrayOps;
>>> > > > > > >   at
>>> > > > > > > org.apache.hudi.com
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> .databricks.spark.avro.SchemaConverters$.convertStructToAvro(SchemaConverters.scala:118)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.hudi.AvroConversionUtils$.convertStructTypeToAvroSchema(AvroConversionUtils.scala:79)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.hudi.HoodieSparkSqlWriter$.write(HoodieSparkSqlWriter.scala:92)
>>> > > > > > >   at
>>> > > > >
>>> org.apache.hudi.DefaultSource.createRelation(DefaultSource.scala:91)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.spark.sql.execution.datasources.SaveIntoDataSourceCommand.run(SaveIntoDataSourceCommand.scala:46)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:70)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:68)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.spark.sql.execution.command.ExecutedCommandExec.doExecute(commands.scala:86)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.spark.sql.execution.SparkPlan.$anonfun$execute$1(SparkPlan.scala:173)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.spark.sql.execution.SparkPlan.$anonfun$executeQuery$1(SparkPlan.scala:211)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:208)
>>> > > > > > >   at
>>> > > > > >
>>> > org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:169)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:110)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:109)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.spark.sql.DataFrameWriter.$anonfun$runCommand$1(DataFrameWriter.scala:828)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$4(SQLExecution.scala:100)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:160)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:87)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.spark.sql.DataFrameWriter.runCommand(DataFrameWriter.scala:828)
>>> > > > > > >   at
>>> > > > > > >
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> org.apache.spark.sql.DataFrameWriter.saveToV1Source(DataFrameWriter.scala:309)
>>> > > > > > >   at
>>> > > > >
>>> org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:293)
>>> > > > > > >   at
>>> > > > >
>>> org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:236)
>>> > > > > > >
>>> > > > > > >
>>> > > > > > > --
>>> > > > > > > Regards,
>>> > > > > > > -Sivabalan
>>> > > > > > >
>>> > > > > >
>>> > > > >
>>> > > > >
>>> > > > > --
>>> > > > > Regards,
>>> > > > > -Sivabalan
>>> > > > >
>>> > > >
>>> > >
>>> >
>>> >
>>> > --
>>> > Regards,
>>> > -Sivabalan
>>> >
>>>
>>
>>
>> --
>> Regards,
>> -Sivabalan
>>
>
>
> --
> Regards,
> -Sivabalan
>

Re: Right Dependency for spark and scala in latest master

Posted by Sivabalan <n....@gmail.com>.
Sorry, sent before attaching screen shots.



On Tue, Jan 14, 2020 at 4:26 PM Sivabalan <n....@gmail.com> wrote:

> 3.x is not available under spark-avro_2.11. It is available only with 2.12
> and since 2.12 is not recommended, we are good. I verified that 2.4.4 works
> for me if both spark shell and packages are using 2.4.4.
>
>
> On Tue, Jan 14, 2020 at 12:19 PM Vinoth Chandar <vi...@apache.org> wrote:
>
>> Siva, can you please confirm that if you match the spark version (version
>> of spark-shell) with the version of spark-avro, things work for both 2.4.4
>> and 3.x? Else this is a release blocker.
>>
>> On Tue, Jan 14, 2020 at 6:45 AM Sivabalan <n....@gmail.com> wrote:
>>
>> > cool, thanks for the assistance Sudha. We have to fix the quick start
>> docs
>> > then accordingly.
>> >
>> >
>> > On Tue, Jan 14, 2020 at 2:28 AM Bhavani Sudha <bh...@gmail.com>
>> > wrote:
>> >
>> > > Hi Siva,
>> > >
>> > > I was able to get past this issue by running from spark-shell( from
>> > version
>> > > 2.4.4) and spark-avro (org.apache.spark:spark-avro_2.11:2.4.4). This
>> is
>> > my
>> > > command line for starting spark shell just for reference.
>> > >
>> > > spark-2.4.4-bin-hadoop2.7/bin/spark-shell --jars
>> > >
>> > >
>> >
>> /<path_to_hudi>/packaging/hudi-spark-bundle/target/hudi-spark-bundle-0.5.1-SNAPSHOT.jar
>> > > --packages org.apache.spark:spark-avro_2.11:2.4.4 --conf
>> > > 'spark.serializer=org.apache.spark.serializer.KryoSerializer'
>> > >
>> > > I think we have to match both spark-shell version and corresponding
>> > > spark-avro version to 2.4.4. Please try this to see if this unblocks
>> you.
>> > >
>> > > Thanks,
>> > > Sudha
>> > >
>> > > On Mon, Jan 13, 2020 at 6:29 PM Vinoth Chandar <vi...@apache.org>
>> > wrote:
>> > >
>> > > > I will triage this tonight and get back!
>> > > >
>> > > > On Mon, Jan 13, 2020 at 2:28 PM Sivabalan <n....@gmail.com>
>> wrote:
>> > > >
>> > > > > Yes, that is what I tried. Is there any recommended version. I
>> tried
>> > > with
>> > > > > 2.4.4. (My local spark from which I ran spark_shell
>> > > > > is spark-3.0.0-preview2, guess that does not matter).
>> > > > >
>> > > > > ./bin/spark-shell --packages
>> org.apache.spark:spark-avro_2.11:2.4.4
>> > > > --conf
>> > > > > 'spark.serializer=org.apache.spark.serializer.KryoSerializer'
>> --jars
>> > > > >
>> > > > >
>> > > >
>> > >
>> >
>> /Users/sivabala/Documents/personal/projects/siva_hudi/hudi/packaging/hudi-spark-bundle/target/hudi-spark-bundle-0.5.1-SNAPSHOT.jar
>> > > > >
>> > > > >
>> > > > > On Mon, Jan 13, 2020 at 3:54 PM Vinoth Chandar <vinoth@apache.org
>> >
>> > > > wrote:
>> > > > >
>> > > > > > Hi Siva,
>> > > > > >
>> > > > > > In general, we need to match the
>> > > > > >  spark-avro_2.11:<spark_version_you_are_running> .. With this
>> > change,
>> > > > we
>> > > > > > effectively dropped support for spark versions older than 2.4.
>> > > > > > Are you running on a older spark version?
>> > > > > >
>> > > > > >
>> > > > > >
>> > > > > > On Mon, Jan 13, 2020 at 10:03 AM Sivabalan <n....@gmail.com>
>> > > wrote:
>> > > > > >
>> > > > > > > Hey folks,
>> > > > > > >    I am running into scala dependency issue w/ latest master
>> > while
>> > > > > trying
>> > > > > > > to run the Quick Start. Can someone help me out on right
>> > > dependency.
>> > > > > > >
>> > > > > > > I see that with Udit's latest PR, we have to specify explicit
>> > > > packages
>> > > > > > for
>> > > > > > > spark-avro. Tried with spark-avro_2.11:2.4.4.
>> > > > > > >
>> > > > > > > scala> df.write.format("org.apache.hudi").
>> > > > > > >      |     options(getQuickstartWriteConfigs).
>> > > > > > >      |     option(PRECOMBINE_FIELD_OPT_KEY, "ts").
>> > > > > > >      |     option(RECORDKEY_FIELD_OPT_KEY, "uuid").
>> > > > > > >      |     option(PARTITIONPATH_FIELD_OPT_KEY,
>> "partitionpath").
>> > > > > > >      |     option(TABLE_NAME, tableName).
>> > > > > > >      |     mode(Overwrite).
>> > > > > > >      |     save(basePath);
>> > > > > > > java.util.ServiceConfigurationError:
>> > > > > > > org.apache.spark.sql.sources.DataSourceRegister: Provider
>> > > > > > > org.apache.spark.sql.avro.AvroFileFormat could not be
>> > instantiated
>> > > > > > >   at java.util.ServiceLoader.fail(ServiceLoader.java:232)
>> > > > > > >   at
>> java.util.ServiceLoader.access$100(ServiceLoader.java:185)
>> > > > > > >   at
>> > > > > > >
>> > > > >
>> > >
>> java.util.ServiceLoader$LazyIterator.nextService(ServiceLoader.java:384)
>> > > > > > >   at
>> > > > java.util.ServiceLoader$LazyIterator.next(ServiceLoader.java:404)
>> > > > > > >   at java.util.ServiceLoader$1.next(ServiceLoader.java:480)
>> > > > > > >   at
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> scala.collection.convert.Wrappers$JIteratorWrapper.next(Wrappers.scala:44)
>> > > > > > >   at scala.collection.Iterator.foreach(Iterator.scala:941)
>> > > > > > >   at scala.collection.Iterator.foreach$(Iterator.scala:941)
>> > > > > > >   at
>> > scala.collection.AbstractIterator.foreach(Iterator.scala:1429)
>> > > > > > >   at
>> scala.collection.IterableLike.foreach(IterableLike.scala:74)
>> > > > > > >   at
>> > scala.collection.IterableLike.foreach$(IterableLike.scala:73)
>> > > > > > >   at
>> scala.collection.AbstractIterable.foreach(Iterable.scala:56)
>> > > > > > >   at
>> > > > > >
>> > > scala.collection.TraversableLike.filterImpl(TraversableLike.scala:255)
>> > > > > > >   at
>> > > > > > >
>> > > >
>> scala.collection.TraversableLike.filterImpl$(TraversableLike.scala:249)
>> > > > > > >   at
>> > > > > >
>> > > scala.collection.AbstractTraversable.filterImpl(Traversable.scala:108)
>> > > > > > >   at
>> > > > scala.collection.TraversableLike.filter(TraversableLike.scala:347)
>> > > > > > >   at
>> > > > >
>> scala.collection.TraversableLike.filter$(TraversableLike.scala:347)
>> > > > > > >   at
>> > > > scala.collection.AbstractTraversable.filter(Traversable.scala:108)
>> > > > > > >   at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.spark.sql.execution.datasources.DataSource$.lookupDataSource(DataSource.scala:644)
>> > > > > > >   at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.spark.sql.execution.datasources.DataSource$.lookupDataSourceV2(DataSource.scala:728)
>> > > > > > >   at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.spark.sql.DataFrameWriter.lookupV2Provider(DataFrameWriter.scala:832)
>> > > > > > >   at
>> > > > >
>> org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:252)
>> > > > > > >   at
>> > > > >
>> org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:236)
>> > > > > > >   ... 66 elided
>> > > > > > > Caused by: java.lang.NoClassDefFoundError:
>> > > > > > > org/apache/spark/sql/execution/datasources/FileFormat$class
>> > > > > > >   at
>> > > > > > >
>> > > > >
>> > >
>> org.apache.spark.sql.avro.AvroFileFormat.<init>(AvroFileFormat.scala:44)
>> > > > > > >   at
>> > sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native
>> > > > > > Method)
>> > > > > > >   at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
>> > > > > > >   at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
>> > > > > > >   at
>> > > java.lang.reflect.Constructor.newInstance(Constructor.java:423)
>> > > > > > >   at java.lang.Class.newInstance(Class.java:442)
>> > > > > > >   at
>> > > > > > >
>> > > > >
>> > >
>> java.util.ServiceLoader$LazyIterator.nextService(ServiceLoader.java:380)
>> > > > > > >   ... 86 more
>> > > > > > > Caused by: java.lang.ClassNotFoundException:
>> > > > > > > org.apache.spark.sql.execution.datasources.FileFormat$class
>> > > > > > >   at
>> java.net.URLClassLoader.findClass(URLClassLoader.java:382)
>> > > > > > >   at java.lang.ClassLoader.loadClass(ClassLoader.java:424)
>> > > > > > >   at java.lang.ClassLoader.loadClass(ClassLoader.java:357)
>> > > > > > >   ... 93 more
>> > > > > > >
>> > > > > > >
>> > > > > > > So, tried with 2.12.
>> > > > > > >
>> > > > > > > ./bin/spark-shell --packages
>> > org.apache.spark:spark-avro_2.12:2.4.4
>> > > > > > --conf
>> > > > > > > 'spark.serializer=org.apache.spark.serializer.KryoSerializer'
>> > > --jars
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> /Users/sivabala/Documents/personal/projects/siva_hudi/hudi/packaging/hudi-spark-bundle/target/hudi-spark-bundle-0.5.1-SNAPSHOT.jar
>> > > > > > >
>> > > > > > > scala> df.write.format("org.apache.hudi").
>> > > > > > >      |     options(getQuickstartWriteConfigs).
>> > > > > > >      |     option(PRECOMBINE_FIELD_OPT_KEY, "ts").
>> > > > > > >      |     option(RECORDKEY_FIELD_OPT_KEY, "uuid").
>> > > > > > >      |     option(PARTITIONPATH_FIELD_OPT_KEY,
>> "partitionpath").
>> > > > > > >      |     option(TABLE_NAME, tableName).
>> > > > > > >      |     mode(Overwrite).
>> > > > > > >      |     save(basePath);
>> > > > > > > 20/01/13 11:42:45 ERROR Executor: Exception in task 0.0 in
>> stage
>> > > 1.0
>> > > > > (TID
>> > > > > > > 2)
>> > > > > > > java.lang.NoSuchMethodError:
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> scala.Predef$.refArrayOps([Ljava/lang/Object;)Lscala/collection/mutable/ArrayOps;
>> > > > > > > at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.hudi.AvroConversionHelper$.createConverterToAvro(AvroConversionHelper.scala:341)
>> > > > > > > at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:46)
>> > > > > > > at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:42)
>> > > > > > > at
>> > org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2(RDD.scala:837)
>> > > > > > > at
>> > > > > >
>> > > >
>> > org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2$adapted(RDD.scala:837)
>> > > > > > > at
>> > > > > >
>> > > >
>> > org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
>> > > > > > > at
>> > org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
>> > > > > > > at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
>> > > > > > > at
>> > > > > >
>> > > >
>> > org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
>> > > > > > > at
>> > org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
>> > > > > > > at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
>> > > > > > > at
>> > > org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
>> > > > > > > at org.apache.spark.scheduler.Task.run(Task.scala:127)
>> > > > > > > at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
>> > > > > > > at
>> > > org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
>> > > > > > > at
>> > > > >
>> org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
>> > > > > > > at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
>> > > > > > > at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
>> > > > > > > at java.lang.Thread.run(Thread.java:748)
>> > > > > > > 20/01/13 11:42:46 WARN TaskSetManager: Lost task 0.0 in stage
>> 1.0
>> > > > (TID
>> > > > > 2,
>> > > > > > > 192.168.1.209, executor driver): java.lang.NoSuchMethodError:
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> scala.Predef$.refArrayOps([Ljava/lang/Object;)Lscala/collection/mutable/ArrayOps;
>> > > > > > > at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.hudi.AvroConversionHelper$.createConverterToAvro(AvroConversionHelper.scala:341)
>> > > > > > > at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:46)
>> > > > > > > at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:42)
>> > > > > > > at
>> > org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2(RDD.scala:837)
>> > > > > > > at
>> > > > > >
>> > > >
>> > org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2$adapted(RDD.scala:837)
>> > > > > > > at
>> > > > > >
>> > > >
>> > org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
>> > > > > > > at
>> > org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
>> > > > > > > at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
>> > > > > > > at
>> > > > > >
>> > > >
>> > org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
>> > > > > > > at
>> > org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
>> > > > > > > at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
>> > > > > > > at
>> > > org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
>> > > > > > > at org.apache.spark.scheduler.Task.run(Task.scala:127)
>> > > > > > > at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
>> > > > > > > at
>> > > org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
>> > > > > > > at
>> > > > >
>> org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
>> > > > > > > at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
>> > > > > > > at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
>> > > > > > > at java.lang.Thread.run(Thread.java:748)
>> > > > > > >
>> > > > > > > 20/01/13 11:42:46 ERROR TaskSetManager: Task 0 in stage 1.0
>> > failed
>> > > 1
>> > > > > > times;
>> > > > > > > aborting job
>> > > > > > > org.apache.spark.SparkException: Job aborted due to stage
>> > failure:
>> > > > > Task 0
>> > > > > > > in stage 1.0 failed 1 times, most recent failure: Lost task
>> 0.0
>> > in
>> > > > > stage
>> > > > > > > 1.0 (TID 2, 192.168.1.209, executor driver):
>> > > > > java.lang.NoSuchMethodError:
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> scala.Predef$.refArrayOps([Ljava/lang/Object;)Lscala/collection/mutable/ArrayOps;
>> > > > > > > at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.hudi.AvroConversionHelper$.createConverterToAvro(AvroConversionHelper.scala:341)
>> > > > > > > at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:46)
>> > > > > > > at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:42)
>> > > > > > > at
>> > org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2(RDD.scala:837)
>> > > > > > > at
>> > > > > >
>> > > >
>> > org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2$adapted(RDD.scala:837)
>> > > > > > > at
>> > > > > >
>> > > >
>> > org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
>> > > > > > > at
>> > org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
>> > > > > > > at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
>> > > > > > > at
>> > > > > >
>> > > >
>> > org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
>> > > > > > > at
>> > org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
>> > > > > > > at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
>> > > > > > > at
>> > > org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
>> > > > > > > at org.apache.spark.scheduler.Task.run(Task.scala:127)
>> > > > > > > at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
>> > > > > > > at
>> > > org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
>> > > > > > > at
>> > > > >
>> org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
>> > > > > > > at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
>> > > > > > > at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
>> > > > > > > at java.lang.Thread.run(Thread.java:748)
>> > > > > > >
>> > > > > > > Driver stacktrace:
>> > > > > > >   at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.spark.scheduler.DAGScheduler.failJobAndIndependentStages(DAGScheduler.scala:1989)
>> > > > > > >   at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:1977)
>> > > > > > >   at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:1976)
>> > > > > > >   at
>> > > > > > >
>> > > > >
>> > >
>> scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62)
>> > > > > > >   at
>> > > > > > >
>> > > > >
>> > >
>> scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55)
>> > > > > > >   at
>> > > > scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49)
>> > > > > > >   at
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1976)
>> > > > > > >   at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:956)
>> > > > > > >   at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:956)
>> > > > > > >   at scala.Option.foreach(Option.scala:407)
>> > > > > > >   at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:956)
>> > > > > > >   at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2206)
>> > > > > > >   at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2155)
>> > > > > > >   at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2144)
>> > > > > > >   at
>> > > org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
>> > > > > > >   at
>> > > > > >
>> > > org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:758)
>> > > > > > >   at
>> > org.apache.spark.SparkContext.runJob(SparkContext.scala:2116)
>> > > > > > >   at
>> > org.apache.spark.SparkContext.runJob(SparkContext.scala:2137)
>> > > > > > >   at
>> > org.apache.spark.SparkContext.runJob(SparkContext.scala:2156)
>> > > > > > >   at org.apache.spark.rdd.RDD.$anonfun$take$1(RDD.scala:1423)
>> > > > > > >   at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
>> > > > > > >   at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
>> > > > > > >   at org.apache.spark.rdd.RDD.withScope(RDD.scala:388)
>> > > > > > >   at org.apache.spark.rdd.RDD.take(RDD.scala:1396)
>> > > > > > >   at
>> org.apache.spark.rdd.RDD.$anonfun$isEmpty$1(RDD.scala:1531)
>> > > > > > >   at
>> > > > > >
>> > > scala.runtime.java8.JFunction0$mcZ$sp.apply(JFunction0$mcZ$sp.java:23)
>> > > > > > >   at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
>> > > > > > >   at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
>> > > > > > >   at org.apache.spark.rdd.RDD.withScope(RDD.scala:388)
>> > > > > > >   at org.apache.spark.rdd.RDD.isEmpty(RDD.scala:1531)
>> > > > > > >   at
>> > > > >
>> org.apache.spark.api.java.JavaRDDLike.isEmpty(JavaRDDLike.scala:544)
>> > > > > > >   at
>> > > > > >
>> > org.apache.spark.api.java.JavaRDDLike.isEmpty$(JavaRDDLike.scala:544)
>> > > > > > >   at
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.spark.api.java.AbstractJavaRDDLike.isEmpty(JavaRDDLike.scala:45)
>> > > > > > >   at
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.hudi.HoodieSparkSqlWriter$.write(HoodieSparkSqlWriter.scala:141)
>> > > > > > >   at
>> > > > >
>> org.apache.hudi.DefaultSource.createRelation(DefaultSource.scala:91)
>> > > > > > >   at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.spark.sql.execution.datasources.SaveIntoDataSourceCommand.run(SaveIntoDataSourceCommand.scala:46)
>> > > > > > >   at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:70)
>> > > > > > >   at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:68)
>> > > > > > >   at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.spark.sql.execution.command.ExecutedCommandExec.doExecute(commands.scala:86)
>> > > > > > >   at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.spark.sql.execution.SparkPlan.$anonfun$execute$1(SparkPlan.scala:173)
>> > > > > > >   at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.spark.sql.execution.SparkPlan.$anonfun$executeQuery$1(SparkPlan.scala:211)
>> > > > > > >   at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
>> > > > > > >   at
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:208)
>> > > > > > >   at
>> > > > > >
>> > org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:169)
>> > > > > > >   at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:110)
>> > > > > > >   at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:109)
>> > > > > > >   at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.spark.sql.DataFrameWriter.$anonfun$runCommand$1(DataFrameWriter.scala:828)
>> > > > > > >   at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$4(SQLExecution.scala:100)
>> > > > > > >   at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:160)
>> > > > > > >   at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:87)
>> > > > > > >   at
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.spark.sql.DataFrameWriter.runCommand(DataFrameWriter.scala:828)
>> > > > > > >   at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.spark.sql.DataFrameWriter.saveToV1Source(DataFrameWriter.scala:309)
>> > > > > > >   at
>> > > > >
>> org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:293)
>> > > > > > >   at
>> > > > >
>> org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:236)
>> > > > > > >   ... 66 elided
>> > > > > > > Caused by: java.lang.NoSuchMethodError:
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> scala.Predef$.refArrayOps([Ljava/lang/Object;)Lscala/collection/mutable/ArrayOps;
>> > > > > > >   at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.hudi.AvroConversionHelper$.createConverterToAvro(AvroConversionHelper.scala:341)
>> > > > > > >   at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:46)
>> > > > > > >   at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:42)
>> > > > > > >   at
>> > > org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2(RDD.scala:837)
>> > > > > > >   at
>> > > > > > >
>> > > > >
>> > >
>> org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2$adapted(RDD.scala:837)
>> > > > > > >   at
>> > > > > > >
>> > > > >
>> > >
>> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
>> > > > > > >   at
>> > > org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
>> > > > > > >   at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
>> > > > > > >   at
>> > > > > > >
>> > > > >
>> > >
>> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
>> > > > > > >   at
>> > > org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
>> > > > > > >   at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
>> > > > > > >   at
>> > > > org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
>> > > > > > >   at org.apache.spark.scheduler.Task.run(Task.scala:127)
>> > > > > > >   at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
>> > > > > > >   at
>> > > > org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
>> > > > > > >   at
>> > > > > >
>> > org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
>> > > > > > >   at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
>> > > > > > >   at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
>> > > > > > >   at java.lang.Thread.run(Thread.java:748)
>> > > > > > >
>> > > > > > > Just to unblock my work, I reverted my repo to a commit just
>> > before
>> > > > > > Udi'ts
>> > > > > > > PR(git checkout d9675c4ec0be3f342c30e17a4779c8319b207681) and
>> > tried
>> > > > > > running
>> > > > > > > the same.
>> > > > > > >
>> > > > > > > ./bin/spark-shell --packages
>> com.databricks:spark-avro_2.11:3.2.0
>> > > > > --conf
>> > > > > > > 'spark.serializer=org.apache.spark.serializer.KryoSerializer'
>> > > --jars
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> /Users/sivabala/Documents/personal/projects/siva_hudi/hudi/packaging/hudi-spark-bundle/target/hudi-spark-bundle-0.5.1-SNAPSHOT.jar
>> > > > > > >
>> > > > > > > // initial imports.
>> > > > > > > ..
>> > > > > > > ..
>> > > > > > >
>> > > > > > > scala> df.write.format("org.apache.hudi").
>> > > > > > >      |     options(getQuickstartWriteConfigs).
>> > > > > > >      |     option(PRECOMBINE_FIELD_OPT_KEY, "ts").
>> > > > > > >      |     option(RECORDKEY_FIELD_OPT_KEY, "uuid").
>> > > > > > >      |     option(PARTITIONPATH_FIELD_OPT_KEY,
>> "partitionpath").
>> > > > > > >      |     option(TABLE_NAME, tableName).
>> > > > > > >      |     mode(Overwrite).
>> > > > > > >      |     save(basePath);
>> > > > > > > java.lang.NoSuchMethodError:
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> scala.Predef$.refArrayOps([Ljava/lang/Object;)Lscala/collection/mutable/ArrayOps;
>> > > > > > >   at
>> > > > > > > org.apache.hudi.com
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> .databricks.spark.avro.SchemaConverters$.convertStructToAvro(SchemaConverters.scala:118)
>> > > > > > >   at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.hudi.AvroConversionUtils$.convertStructTypeToAvroSchema(AvroConversionUtils.scala:79)
>> > > > > > >   at
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.hudi.HoodieSparkSqlWriter$.write(HoodieSparkSqlWriter.scala:92)
>> > > > > > >   at
>> > > > >
>> org.apache.hudi.DefaultSource.createRelation(DefaultSource.scala:91)
>> > > > > > >   at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.spark.sql.execution.datasources.SaveIntoDataSourceCommand.run(SaveIntoDataSourceCommand.scala:46)
>> > > > > > >   at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:70)
>> > > > > > >   at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:68)
>> > > > > > >   at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.spark.sql.execution.command.ExecutedCommandExec.doExecute(commands.scala:86)
>> > > > > > >   at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.spark.sql.execution.SparkPlan.$anonfun$execute$1(SparkPlan.scala:173)
>> > > > > > >   at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.spark.sql.execution.SparkPlan.$anonfun$executeQuery$1(SparkPlan.scala:211)
>> > > > > > >   at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
>> > > > > > >   at
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:208)
>> > > > > > >   at
>> > > > > >
>> > org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:169)
>> > > > > > >   at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:110)
>> > > > > > >   at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:109)
>> > > > > > >   at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.spark.sql.DataFrameWriter.$anonfun$runCommand$1(DataFrameWriter.scala:828)
>> > > > > > >   at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$4(SQLExecution.scala:100)
>> > > > > > >   at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:160)
>> > > > > > >   at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:87)
>> > > > > > >   at
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.spark.sql.DataFrameWriter.runCommand(DataFrameWriter.scala:828)
>> > > > > > >   at
>> > > > > > >
>> > > > > > >
>> > > > > >
>> > > > >
>> > > >
>> > >
>> >
>> org.apache.spark.sql.DataFrameWriter.saveToV1Source(DataFrameWriter.scala:309)
>> > > > > > >   at
>> > > > >
>> org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:293)
>> > > > > > >   at
>> > > > >
>> org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:236)
>> > > > > > >
>> > > > > > >
>> > > > > > > --
>> > > > > > > Regards,
>> > > > > > > -Sivabalan
>> > > > > > >
>> > > > > >
>> > > > >
>> > > > >
>> > > > > --
>> > > > > Regards,
>> > > > > -Sivabalan
>> > > > >
>> > > >
>> > >
>> >
>> >
>> > --
>> > Regards,
>> > -Sivabalan
>> >
>>
>
>
> --
> Regards,
> -Sivabalan
>


-- 
Regards,
-Sivabalan

Re: Right Dependency for spark and scala in latest master

Posted by Sivabalan <n....@gmail.com>.
3.x is not available under spark-avro_2.11. It is available only with 2.12
and since 2.12 is not recommended, we are good. I verified that 2.4.4 works
for me if both spark shell and packages are using 2.4.4.


On Tue, Jan 14, 2020 at 12:19 PM Vinoth Chandar <vi...@apache.org> wrote:

> Siva, can you please confirm that if you match the spark version (version
> of spark-shell) with the version of spark-avro, things work for both 2.4.4
> and 3.x? Else this is a release blocker.
>
> On Tue, Jan 14, 2020 at 6:45 AM Sivabalan <n....@gmail.com> wrote:
>
> > cool, thanks for the assistance Sudha. We have to fix the quick start
> docs
> > then accordingly.
> >
> >
> > On Tue, Jan 14, 2020 at 2:28 AM Bhavani Sudha <bh...@gmail.com>
> > wrote:
> >
> > > Hi Siva,
> > >
> > > I was able to get past this issue by running from spark-shell( from
> > version
> > > 2.4.4) and spark-avro (org.apache.spark:spark-avro_2.11:2.4.4). This is
> > my
> > > command line for starting spark shell just for reference.
> > >
> > > spark-2.4.4-bin-hadoop2.7/bin/spark-shell --jars
> > >
> > >
> >
> /<path_to_hudi>/packaging/hudi-spark-bundle/target/hudi-spark-bundle-0.5.1-SNAPSHOT.jar
> > > --packages org.apache.spark:spark-avro_2.11:2.4.4 --conf
> > > 'spark.serializer=org.apache.spark.serializer.KryoSerializer'
> > >
> > > I think we have to match both spark-shell version and corresponding
> > > spark-avro version to 2.4.4. Please try this to see if this unblocks
> you.
> > >
> > > Thanks,
> > > Sudha
> > >
> > > On Mon, Jan 13, 2020 at 6:29 PM Vinoth Chandar <vi...@apache.org>
> > wrote:
> > >
> > > > I will triage this tonight and get back!
> > > >
> > > > On Mon, Jan 13, 2020 at 2:28 PM Sivabalan <n....@gmail.com>
> wrote:
> > > >
> > > > > Yes, that is what I tried. Is there any recommended version. I
> tried
> > > with
> > > > > 2.4.4. (My local spark from which I ran spark_shell
> > > > > is spark-3.0.0-preview2, guess that does not matter).
> > > > >
> > > > > ./bin/spark-shell --packages org.apache.spark:spark-avro_2.11:2.4.4
> > > > --conf
> > > > > 'spark.serializer=org.apache.spark.serializer.KryoSerializer'
> --jars
> > > > >
> > > > >
> > > >
> > >
> >
> /Users/sivabala/Documents/personal/projects/siva_hudi/hudi/packaging/hudi-spark-bundle/target/hudi-spark-bundle-0.5.1-SNAPSHOT.jar
> > > > >
> > > > >
> > > > > On Mon, Jan 13, 2020 at 3:54 PM Vinoth Chandar <vi...@apache.org>
> > > > wrote:
> > > > >
> > > > > > Hi Siva,
> > > > > >
> > > > > > In general, we need to match the
> > > > > >  spark-avro_2.11:<spark_version_you_are_running> .. With this
> > change,
> > > > we
> > > > > > effectively dropped support for spark versions older than 2.4.
> > > > > > Are you running on a older spark version?
> > > > > >
> > > > > >
> > > > > >
> > > > > > On Mon, Jan 13, 2020 at 10:03 AM Sivabalan <n....@gmail.com>
> > > wrote:
> > > > > >
> > > > > > > Hey folks,
> > > > > > >    I am running into scala dependency issue w/ latest master
> > while
> > > > > trying
> > > > > > > to run the Quick Start. Can someone help me out on right
> > > dependency.
> > > > > > >
> > > > > > > I see that with Udit's latest PR, we have to specify explicit
> > > > packages
> > > > > > for
> > > > > > > spark-avro. Tried with spark-avro_2.11:2.4.4.
> > > > > > >
> > > > > > > scala> df.write.format("org.apache.hudi").
> > > > > > >      |     options(getQuickstartWriteConfigs).
> > > > > > >      |     option(PRECOMBINE_FIELD_OPT_KEY, "ts").
> > > > > > >      |     option(RECORDKEY_FIELD_OPT_KEY, "uuid").
> > > > > > >      |     option(PARTITIONPATH_FIELD_OPT_KEY,
> "partitionpath").
> > > > > > >      |     option(TABLE_NAME, tableName).
> > > > > > >      |     mode(Overwrite).
> > > > > > >      |     save(basePath);
> > > > > > > java.util.ServiceConfigurationError:
> > > > > > > org.apache.spark.sql.sources.DataSourceRegister: Provider
> > > > > > > org.apache.spark.sql.avro.AvroFileFormat could not be
> > instantiated
> > > > > > >   at java.util.ServiceLoader.fail(ServiceLoader.java:232)
> > > > > > >   at java.util.ServiceLoader.access$100(ServiceLoader.java:185)
> > > > > > >   at
> > > > > > >
> > > > >
> > >
> java.util.ServiceLoader$LazyIterator.nextService(ServiceLoader.java:384)
> > > > > > >   at
> > > > java.util.ServiceLoader$LazyIterator.next(ServiceLoader.java:404)
> > > > > > >   at java.util.ServiceLoader$1.next(ServiceLoader.java:480)
> > > > > > >   at
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> scala.collection.convert.Wrappers$JIteratorWrapper.next(Wrappers.scala:44)
> > > > > > >   at scala.collection.Iterator.foreach(Iterator.scala:941)
> > > > > > >   at scala.collection.Iterator.foreach$(Iterator.scala:941)
> > > > > > >   at
> > scala.collection.AbstractIterator.foreach(Iterator.scala:1429)
> > > > > > >   at
> scala.collection.IterableLike.foreach(IterableLike.scala:74)
> > > > > > >   at
> > scala.collection.IterableLike.foreach$(IterableLike.scala:73)
> > > > > > >   at
> scala.collection.AbstractIterable.foreach(Iterable.scala:56)
> > > > > > >   at
> > > > > >
> > > scala.collection.TraversableLike.filterImpl(TraversableLike.scala:255)
> > > > > > >   at
> > > > > > >
> > > >
> scala.collection.TraversableLike.filterImpl$(TraversableLike.scala:249)
> > > > > > >   at
> > > > > >
> > > scala.collection.AbstractTraversable.filterImpl(Traversable.scala:108)
> > > > > > >   at
> > > > scala.collection.TraversableLike.filter(TraversableLike.scala:347)
> > > > > > >   at
> > > > > scala.collection.TraversableLike.filter$(TraversableLike.scala:347)
> > > > > > >   at
> > > > scala.collection.AbstractTraversable.filter(Traversable.scala:108)
> > > > > > >   at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.datasources.DataSource$.lookupDataSource(DataSource.scala:644)
> > > > > > >   at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.datasources.DataSource$.lookupDataSourceV2(DataSource.scala:728)
> > > > > > >   at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.DataFrameWriter.lookupV2Provider(DataFrameWriter.scala:832)
> > > > > > >   at
> > > > >
> org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:252)
> > > > > > >   at
> > > > >
> org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:236)
> > > > > > >   ... 66 elided
> > > > > > > Caused by: java.lang.NoClassDefFoundError:
> > > > > > > org/apache/spark/sql/execution/datasources/FileFormat$class
> > > > > > >   at
> > > > > > >
> > > > >
> > >
> org.apache.spark.sql.avro.AvroFileFormat.<init>(AvroFileFormat.scala:44)
> > > > > > >   at
> > sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native
> > > > > > Method)
> > > > > > >   at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
> > > > > > >   at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
> > > > > > >   at
> > > java.lang.reflect.Constructor.newInstance(Constructor.java:423)
> > > > > > >   at java.lang.Class.newInstance(Class.java:442)
> > > > > > >   at
> > > > > > >
> > > > >
> > >
> java.util.ServiceLoader$LazyIterator.nextService(ServiceLoader.java:380)
> > > > > > >   ... 86 more
> > > > > > > Caused by: java.lang.ClassNotFoundException:
> > > > > > > org.apache.spark.sql.execution.datasources.FileFormat$class
> > > > > > >   at java.net.URLClassLoader.findClass(URLClassLoader.java:382)
> > > > > > >   at java.lang.ClassLoader.loadClass(ClassLoader.java:424)
> > > > > > >   at java.lang.ClassLoader.loadClass(ClassLoader.java:357)
> > > > > > >   ... 93 more
> > > > > > >
> > > > > > >
> > > > > > > So, tried with 2.12.
> > > > > > >
> > > > > > > ./bin/spark-shell --packages
> > org.apache.spark:spark-avro_2.12:2.4.4
> > > > > > --conf
> > > > > > > 'spark.serializer=org.apache.spark.serializer.KryoSerializer'
> > > --jars
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> /Users/sivabala/Documents/personal/projects/siva_hudi/hudi/packaging/hudi-spark-bundle/target/hudi-spark-bundle-0.5.1-SNAPSHOT.jar
> > > > > > >
> > > > > > > scala> df.write.format("org.apache.hudi").
> > > > > > >      |     options(getQuickstartWriteConfigs).
> > > > > > >      |     option(PRECOMBINE_FIELD_OPT_KEY, "ts").
> > > > > > >      |     option(RECORDKEY_FIELD_OPT_KEY, "uuid").
> > > > > > >      |     option(PARTITIONPATH_FIELD_OPT_KEY,
> "partitionpath").
> > > > > > >      |     option(TABLE_NAME, tableName).
> > > > > > >      |     mode(Overwrite).
> > > > > > >      |     save(basePath);
> > > > > > > 20/01/13 11:42:45 ERROR Executor: Exception in task 0.0 in
> stage
> > > 1.0
> > > > > (TID
> > > > > > > 2)
> > > > > > > java.lang.NoSuchMethodError:
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> scala.Predef$.refArrayOps([Ljava/lang/Object;)Lscala/collection/mutable/ArrayOps;
> > > > > > > at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.hudi.AvroConversionHelper$.createConverterToAvro(AvroConversionHelper.scala:341)
> > > > > > > at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:46)
> > > > > > > at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:42)
> > > > > > > at
> > org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2(RDD.scala:837)
> > > > > > > at
> > > > > >
> > > >
> > org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2$adapted(RDD.scala:837)
> > > > > > > at
> > > > > >
> > > >
> > org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> > > > > > > at
> > org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
> > > > > > > at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
> > > > > > > at
> > > > > >
> > > >
> > org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> > > > > > > at
> > org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
> > > > > > > at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
> > > > > > > at
> > > org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
> > > > > > > at org.apache.spark.scheduler.Task.run(Task.scala:127)
> > > > > > > at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
> > > > > > > at
> > > org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
> > > > > > > at
> > > > >
> org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
> > > > > > > at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
> > > > > > > at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
> > > > > > > at java.lang.Thread.run(Thread.java:748)
> > > > > > > 20/01/13 11:42:46 WARN TaskSetManager: Lost task 0.0 in stage
> 1.0
> > > > (TID
> > > > > 2,
> > > > > > > 192.168.1.209, executor driver): java.lang.NoSuchMethodError:
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> scala.Predef$.refArrayOps([Ljava/lang/Object;)Lscala/collection/mutable/ArrayOps;
> > > > > > > at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.hudi.AvroConversionHelper$.createConverterToAvro(AvroConversionHelper.scala:341)
> > > > > > > at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:46)
> > > > > > > at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:42)
> > > > > > > at
> > org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2(RDD.scala:837)
> > > > > > > at
> > > > > >
> > > >
> > org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2$adapted(RDD.scala:837)
> > > > > > > at
> > > > > >
> > > >
> > org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> > > > > > > at
> > org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
> > > > > > > at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
> > > > > > > at
> > > > > >
> > > >
> > org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> > > > > > > at
> > org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
> > > > > > > at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
> > > > > > > at
> > > org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
> > > > > > > at org.apache.spark.scheduler.Task.run(Task.scala:127)
> > > > > > > at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
> > > > > > > at
> > > org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
> > > > > > > at
> > > > >
> org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
> > > > > > > at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
> > > > > > > at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
> > > > > > > at java.lang.Thread.run(Thread.java:748)
> > > > > > >
> > > > > > > 20/01/13 11:42:46 ERROR TaskSetManager: Task 0 in stage 1.0
> > failed
> > > 1
> > > > > > times;
> > > > > > > aborting job
> > > > > > > org.apache.spark.SparkException: Job aborted due to stage
> > failure:
> > > > > Task 0
> > > > > > > in stage 1.0 failed 1 times, most recent failure: Lost task 0.0
> > in
> > > > > stage
> > > > > > > 1.0 (TID 2, 192.168.1.209, executor driver):
> > > > > java.lang.NoSuchMethodError:
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> scala.Predef$.refArrayOps([Ljava/lang/Object;)Lscala/collection/mutable/ArrayOps;
> > > > > > > at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.hudi.AvroConversionHelper$.createConverterToAvro(AvroConversionHelper.scala:341)
> > > > > > > at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:46)
> > > > > > > at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:42)
> > > > > > > at
> > org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2(RDD.scala:837)
> > > > > > > at
> > > > > >
> > > >
> > org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2$adapted(RDD.scala:837)
> > > > > > > at
> > > > > >
> > > >
> > org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> > > > > > > at
> > org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
> > > > > > > at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
> > > > > > > at
> > > > > >
> > > >
> > org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> > > > > > > at
> > org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
> > > > > > > at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
> > > > > > > at
> > > org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
> > > > > > > at org.apache.spark.scheduler.Task.run(Task.scala:127)
> > > > > > > at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
> > > > > > > at
> > > org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
> > > > > > > at
> > > > >
> org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
> > > > > > > at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
> > > > > > > at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
> > > > > > > at java.lang.Thread.run(Thread.java:748)
> > > > > > >
> > > > > > > Driver stacktrace:
> > > > > > >   at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.scheduler.DAGScheduler.failJobAndIndependentStages(DAGScheduler.scala:1989)
> > > > > > >   at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:1977)
> > > > > > >   at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:1976)
> > > > > > >   at
> > > > > > >
> > > > >
> > >
> scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62)
> > > > > > >   at
> > > > > > >
> > > > >
> > >
> scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55)
> > > > > > >   at
> > > > scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49)
> > > > > > >   at
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1976)
> > > > > > >   at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:956)
> > > > > > >   at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:956)
> > > > > > >   at scala.Option.foreach(Option.scala:407)
> > > > > > >   at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:956)
> > > > > > >   at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2206)
> > > > > > >   at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2155)
> > > > > > >   at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2144)
> > > > > > >   at
> > > org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
> > > > > > >   at
> > > > > >
> > > org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:758)
> > > > > > >   at
> > org.apache.spark.SparkContext.runJob(SparkContext.scala:2116)
> > > > > > >   at
> > org.apache.spark.SparkContext.runJob(SparkContext.scala:2137)
> > > > > > >   at
> > org.apache.spark.SparkContext.runJob(SparkContext.scala:2156)
> > > > > > >   at org.apache.spark.rdd.RDD.$anonfun$take$1(RDD.scala:1423)
> > > > > > >   at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
> > > > > > >   at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
> > > > > > >   at org.apache.spark.rdd.RDD.withScope(RDD.scala:388)
> > > > > > >   at org.apache.spark.rdd.RDD.take(RDD.scala:1396)
> > > > > > >   at
> org.apache.spark.rdd.RDD.$anonfun$isEmpty$1(RDD.scala:1531)
> > > > > > >   at
> > > > > >
> > > scala.runtime.java8.JFunction0$mcZ$sp.apply(JFunction0$mcZ$sp.java:23)
> > > > > > >   at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
> > > > > > >   at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
> > > > > > >   at org.apache.spark.rdd.RDD.withScope(RDD.scala:388)
> > > > > > >   at org.apache.spark.rdd.RDD.isEmpty(RDD.scala:1531)
> > > > > > >   at
> > > > >
> org.apache.spark.api.java.JavaRDDLike.isEmpty(JavaRDDLike.scala:544)
> > > > > > >   at
> > > > > >
> > org.apache.spark.api.java.JavaRDDLike.isEmpty$(JavaRDDLike.scala:544)
> > > > > > >   at
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.api.java.AbstractJavaRDDLike.isEmpty(JavaRDDLike.scala:45)
> > > > > > >   at
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.hudi.HoodieSparkSqlWriter$.write(HoodieSparkSqlWriter.scala:141)
> > > > > > >   at
> > > > >
> org.apache.hudi.DefaultSource.createRelation(DefaultSource.scala:91)
> > > > > > >   at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.datasources.SaveIntoDataSourceCommand.run(SaveIntoDataSourceCommand.scala:46)
> > > > > > >   at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:70)
> > > > > > >   at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:68)
> > > > > > >   at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.command.ExecutedCommandExec.doExecute(commands.scala:86)
> > > > > > >   at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.SparkPlan.$anonfun$execute$1(SparkPlan.scala:173)
> > > > > > >   at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.SparkPlan.$anonfun$executeQuery$1(SparkPlan.scala:211)
> > > > > > >   at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
> > > > > > >   at
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:208)
> > > > > > >   at
> > > > > >
> > org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:169)
> > > > > > >   at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:110)
> > > > > > >   at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:109)
> > > > > > >   at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.DataFrameWriter.$anonfun$runCommand$1(DataFrameWriter.scala:828)
> > > > > > >   at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$4(SQLExecution.scala:100)
> > > > > > >   at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:160)
> > > > > > >   at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:87)
> > > > > > >   at
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.DataFrameWriter.runCommand(DataFrameWriter.scala:828)
> > > > > > >   at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.DataFrameWriter.saveToV1Source(DataFrameWriter.scala:309)
> > > > > > >   at
> > > > >
> org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:293)
> > > > > > >   at
> > > > >
> org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:236)
> > > > > > >   ... 66 elided
> > > > > > > Caused by: java.lang.NoSuchMethodError:
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> scala.Predef$.refArrayOps([Ljava/lang/Object;)Lscala/collection/mutable/ArrayOps;
> > > > > > >   at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.hudi.AvroConversionHelper$.createConverterToAvro(AvroConversionHelper.scala:341)
> > > > > > >   at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:46)
> > > > > > >   at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:42)
> > > > > > >   at
> > > org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2(RDD.scala:837)
> > > > > > >   at
> > > > > > >
> > > > >
> > >
> org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2$adapted(RDD.scala:837)
> > > > > > >   at
> > > > > > >
> > > > >
> > >
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> > > > > > >   at
> > > org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
> > > > > > >   at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
> > > > > > >   at
> > > > > > >
> > > > >
> > >
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> > > > > > >   at
> > > org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
> > > > > > >   at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
> > > > > > >   at
> > > > org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
> > > > > > >   at org.apache.spark.scheduler.Task.run(Task.scala:127)
> > > > > > >   at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
> > > > > > >   at
> > > > org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
> > > > > > >   at
> > > > > >
> > org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
> > > > > > >   at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
> > > > > > >   at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
> > > > > > >   at java.lang.Thread.run(Thread.java:748)
> > > > > > >
> > > > > > > Just to unblock my work, I reverted my repo to a commit just
> > before
> > > > > > Udi'ts
> > > > > > > PR(git checkout d9675c4ec0be3f342c30e17a4779c8319b207681) and
> > tried
> > > > > > running
> > > > > > > the same.
> > > > > > >
> > > > > > > ./bin/spark-shell --packages
> com.databricks:spark-avro_2.11:3.2.0
> > > > > --conf
> > > > > > > 'spark.serializer=org.apache.spark.serializer.KryoSerializer'
> > > --jars
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> /Users/sivabala/Documents/personal/projects/siva_hudi/hudi/packaging/hudi-spark-bundle/target/hudi-spark-bundle-0.5.1-SNAPSHOT.jar
> > > > > > >
> > > > > > > // initial imports.
> > > > > > > ..
> > > > > > > ..
> > > > > > >
> > > > > > > scala> df.write.format("org.apache.hudi").
> > > > > > >      |     options(getQuickstartWriteConfigs).
> > > > > > >      |     option(PRECOMBINE_FIELD_OPT_KEY, "ts").
> > > > > > >      |     option(RECORDKEY_FIELD_OPT_KEY, "uuid").
> > > > > > >      |     option(PARTITIONPATH_FIELD_OPT_KEY,
> "partitionpath").
> > > > > > >      |     option(TABLE_NAME, tableName).
> > > > > > >      |     mode(Overwrite).
> > > > > > >      |     save(basePath);
> > > > > > > java.lang.NoSuchMethodError:
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> scala.Predef$.refArrayOps([Ljava/lang/Object;)Lscala/collection/mutable/ArrayOps;
> > > > > > >   at
> > > > > > > org.apache.hudi.com
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> .databricks.spark.avro.SchemaConverters$.convertStructToAvro(SchemaConverters.scala:118)
> > > > > > >   at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.hudi.AvroConversionUtils$.convertStructTypeToAvroSchema(AvroConversionUtils.scala:79)
> > > > > > >   at
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.hudi.HoodieSparkSqlWriter$.write(HoodieSparkSqlWriter.scala:92)
> > > > > > >   at
> > > > >
> org.apache.hudi.DefaultSource.createRelation(DefaultSource.scala:91)
> > > > > > >   at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.datasources.SaveIntoDataSourceCommand.run(SaveIntoDataSourceCommand.scala:46)
> > > > > > >   at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:70)
> > > > > > >   at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:68)
> > > > > > >   at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.command.ExecutedCommandExec.doExecute(commands.scala:86)
> > > > > > >   at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.SparkPlan.$anonfun$execute$1(SparkPlan.scala:173)
> > > > > > >   at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.SparkPlan.$anonfun$executeQuery$1(SparkPlan.scala:211)
> > > > > > >   at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
> > > > > > >   at
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:208)
> > > > > > >   at
> > > > > >
> > org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:169)
> > > > > > >   at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:110)
> > > > > > >   at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:109)
> > > > > > >   at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.DataFrameWriter.$anonfun$runCommand$1(DataFrameWriter.scala:828)
> > > > > > >   at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$4(SQLExecution.scala:100)
> > > > > > >   at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:160)
> > > > > > >   at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:87)
> > > > > > >   at
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.DataFrameWriter.runCommand(DataFrameWriter.scala:828)
> > > > > > >   at
> > > > > > >
> > > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.DataFrameWriter.saveToV1Source(DataFrameWriter.scala:309)
> > > > > > >   at
> > > > >
> org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:293)
> > > > > > >   at
> > > > >
> org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:236)
> > > > > > >
> > > > > > >
> > > > > > > --
> > > > > > > Regards,
> > > > > > > -Sivabalan
> > > > > > >
> > > > > >
> > > > >
> > > > >
> > > > > --
> > > > > Regards,
> > > > > -Sivabalan
> > > > >
> > > >
> > >
> >
> >
> > --
> > Regards,
> > -Sivabalan
> >
>


-- 
Regards,
-Sivabalan

Re: Right Dependency for spark and scala in latest master

Posted by Vinoth Chandar <vi...@apache.org>.
Siva, can you please confirm that if you match the spark version (version
of spark-shell) with the version of spark-avro, things work for both 2.4.4
and 3.x? Else this is a release blocker.

On Tue, Jan 14, 2020 at 6:45 AM Sivabalan <n....@gmail.com> wrote:

> cool, thanks for the assistance Sudha. We have to fix the quick start docs
> then accordingly.
>
>
> On Tue, Jan 14, 2020 at 2:28 AM Bhavani Sudha <bh...@gmail.com>
> wrote:
>
> > Hi Siva,
> >
> > I was able to get past this issue by running from spark-shell( from
> version
> > 2.4.4) and spark-avro (org.apache.spark:spark-avro_2.11:2.4.4). This is
> my
> > command line for starting spark shell just for reference.
> >
> > spark-2.4.4-bin-hadoop2.7/bin/spark-shell --jars
> >
> >
> /<path_to_hudi>/packaging/hudi-spark-bundle/target/hudi-spark-bundle-0.5.1-SNAPSHOT.jar
> > --packages org.apache.spark:spark-avro_2.11:2.4.4 --conf
> > 'spark.serializer=org.apache.spark.serializer.KryoSerializer'
> >
> > I think we have to match both spark-shell version and corresponding
> > spark-avro version to 2.4.4. Please try this to see if this unblocks you.
> >
> > Thanks,
> > Sudha
> >
> > On Mon, Jan 13, 2020 at 6:29 PM Vinoth Chandar <vi...@apache.org>
> wrote:
> >
> > > I will triage this tonight and get back!
> > >
> > > On Mon, Jan 13, 2020 at 2:28 PM Sivabalan <n....@gmail.com> wrote:
> > >
> > > > Yes, that is what I tried. Is there any recommended version. I tried
> > with
> > > > 2.4.4. (My local spark from which I ran spark_shell
> > > > is spark-3.0.0-preview2, guess that does not matter).
> > > >
> > > > ./bin/spark-shell --packages org.apache.spark:spark-avro_2.11:2.4.4
> > > --conf
> > > > 'spark.serializer=org.apache.spark.serializer.KryoSerializer' --jars
> > > >
> > > >
> > >
> >
> /Users/sivabala/Documents/personal/projects/siva_hudi/hudi/packaging/hudi-spark-bundle/target/hudi-spark-bundle-0.5.1-SNAPSHOT.jar
> > > >
> > > >
> > > > On Mon, Jan 13, 2020 at 3:54 PM Vinoth Chandar <vi...@apache.org>
> > > wrote:
> > > >
> > > > > Hi Siva,
> > > > >
> > > > > In general, we need to match the
> > > > >  spark-avro_2.11:<spark_version_you_are_running> .. With this
> change,
> > > we
> > > > > effectively dropped support for spark versions older than 2.4.
> > > > > Are you running on a older spark version?
> > > > >
> > > > >
> > > > >
> > > > > On Mon, Jan 13, 2020 at 10:03 AM Sivabalan <n....@gmail.com>
> > wrote:
> > > > >
> > > > > > Hey folks,
> > > > > >    I am running into scala dependency issue w/ latest master
> while
> > > > trying
> > > > > > to run the Quick Start. Can someone help me out on right
> > dependency.
> > > > > >
> > > > > > I see that with Udit's latest PR, we have to specify explicit
> > > packages
> > > > > for
> > > > > > spark-avro. Tried with spark-avro_2.11:2.4.4.
> > > > > >
> > > > > > scala> df.write.format("org.apache.hudi").
> > > > > >      |     options(getQuickstartWriteConfigs).
> > > > > >      |     option(PRECOMBINE_FIELD_OPT_KEY, "ts").
> > > > > >      |     option(RECORDKEY_FIELD_OPT_KEY, "uuid").
> > > > > >      |     option(PARTITIONPATH_FIELD_OPT_KEY, "partitionpath").
> > > > > >      |     option(TABLE_NAME, tableName).
> > > > > >      |     mode(Overwrite).
> > > > > >      |     save(basePath);
> > > > > > java.util.ServiceConfigurationError:
> > > > > > org.apache.spark.sql.sources.DataSourceRegister: Provider
> > > > > > org.apache.spark.sql.avro.AvroFileFormat could not be
> instantiated
> > > > > >   at java.util.ServiceLoader.fail(ServiceLoader.java:232)
> > > > > >   at java.util.ServiceLoader.access$100(ServiceLoader.java:185)
> > > > > >   at
> > > > > >
> > > >
> > java.util.ServiceLoader$LazyIterator.nextService(ServiceLoader.java:384)
> > > > > >   at
> > > java.util.ServiceLoader$LazyIterator.next(ServiceLoader.java:404)
> > > > > >   at java.util.ServiceLoader$1.next(ServiceLoader.java:480)
> > > > > >   at
> > > > > >
> > > > >
> > > >
> > >
> >
> scala.collection.convert.Wrappers$JIteratorWrapper.next(Wrappers.scala:44)
> > > > > >   at scala.collection.Iterator.foreach(Iterator.scala:941)
> > > > > >   at scala.collection.Iterator.foreach$(Iterator.scala:941)
> > > > > >   at
> scala.collection.AbstractIterator.foreach(Iterator.scala:1429)
> > > > > >   at scala.collection.IterableLike.foreach(IterableLike.scala:74)
> > > > > >   at
> scala.collection.IterableLike.foreach$(IterableLike.scala:73)
> > > > > >   at scala.collection.AbstractIterable.foreach(Iterable.scala:56)
> > > > > >   at
> > > > >
> > scala.collection.TraversableLike.filterImpl(TraversableLike.scala:255)
> > > > > >   at
> > > > > >
> > > scala.collection.TraversableLike.filterImpl$(TraversableLike.scala:249)
> > > > > >   at
> > > > >
> > scala.collection.AbstractTraversable.filterImpl(Traversable.scala:108)
> > > > > >   at
> > > scala.collection.TraversableLike.filter(TraversableLike.scala:347)
> > > > > >   at
> > > > scala.collection.TraversableLike.filter$(TraversableLike.scala:347)
> > > > > >   at
> > > scala.collection.AbstractTraversable.filter(Traversable.scala:108)
> > > > > >   at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.datasources.DataSource$.lookupDataSource(DataSource.scala:644)
> > > > > >   at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.datasources.DataSource$.lookupDataSourceV2(DataSource.scala:728)
> > > > > >   at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.DataFrameWriter.lookupV2Provider(DataFrameWriter.scala:832)
> > > > > >   at
> > > > org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:252)
> > > > > >   at
> > > > org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:236)
> > > > > >   ... 66 elided
> > > > > > Caused by: java.lang.NoClassDefFoundError:
> > > > > > org/apache/spark/sql/execution/datasources/FileFormat$class
> > > > > >   at
> > > > > >
> > > >
> > org.apache.spark.sql.avro.AvroFileFormat.<init>(AvroFileFormat.scala:44)
> > > > > >   at
> sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native
> > > > > Method)
> > > > > >   at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
> > > > > >   at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
> > > > > >   at
> > java.lang.reflect.Constructor.newInstance(Constructor.java:423)
> > > > > >   at java.lang.Class.newInstance(Class.java:442)
> > > > > >   at
> > > > > >
> > > >
> > java.util.ServiceLoader$LazyIterator.nextService(ServiceLoader.java:380)
> > > > > >   ... 86 more
> > > > > > Caused by: java.lang.ClassNotFoundException:
> > > > > > org.apache.spark.sql.execution.datasources.FileFormat$class
> > > > > >   at java.net.URLClassLoader.findClass(URLClassLoader.java:382)
> > > > > >   at java.lang.ClassLoader.loadClass(ClassLoader.java:424)
> > > > > >   at java.lang.ClassLoader.loadClass(ClassLoader.java:357)
> > > > > >   ... 93 more
> > > > > >
> > > > > >
> > > > > > So, tried with 2.12.
> > > > > >
> > > > > > ./bin/spark-shell --packages
> org.apache.spark:spark-avro_2.12:2.4.4
> > > > > --conf
> > > > > > 'spark.serializer=org.apache.spark.serializer.KryoSerializer'
> > --jars
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> /Users/sivabala/Documents/personal/projects/siva_hudi/hudi/packaging/hudi-spark-bundle/target/hudi-spark-bundle-0.5.1-SNAPSHOT.jar
> > > > > >
> > > > > > scala> df.write.format("org.apache.hudi").
> > > > > >      |     options(getQuickstartWriteConfigs).
> > > > > >      |     option(PRECOMBINE_FIELD_OPT_KEY, "ts").
> > > > > >      |     option(RECORDKEY_FIELD_OPT_KEY, "uuid").
> > > > > >      |     option(PARTITIONPATH_FIELD_OPT_KEY, "partitionpath").
> > > > > >      |     option(TABLE_NAME, tableName).
> > > > > >      |     mode(Overwrite).
> > > > > >      |     save(basePath);
> > > > > > 20/01/13 11:42:45 ERROR Executor: Exception in task 0.0 in stage
> > 1.0
> > > > (TID
> > > > > > 2)
> > > > > > java.lang.NoSuchMethodError:
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> scala.Predef$.refArrayOps([Ljava/lang/Object;)Lscala/collection/mutable/ArrayOps;
> > > > > > at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.hudi.AvroConversionHelper$.createConverterToAvro(AvroConversionHelper.scala:341)
> > > > > > at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:46)
> > > > > > at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:42)
> > > > > > at
> org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2(RDD.scala:837)
> > > > > > at
> > > > >
> > >
> org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2$adapted(RDD.scala:837)
> > > > > > at
> > > > >
> > >
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> > > > > > at
> org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
> > > > > > at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
> > > > > > at
> > > > >
> > >
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> > > > > > at
> org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
> > > > > > at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
> > > > > > at
> > org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
> > > > > > at org.apache.spark.scheduler.Task.run(Task.scala:127)
> > > > > > at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
> > > > > > at
> > org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
> > > > > > at
> > > > org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
> > > > > > at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
> > > > > > at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
> > > > > > at java.lang.Thread.run(Thread.java:748)
> > > > > > 20/01/13 11:42:46 WARN TaskSetManager: Lost task 0.0 in stage 1.0
> > > (TID
> > > > 2,
> > > > > > 192.168.1.209, executor driver): java.lang.NoSuchMethodError:
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> scala.Predef$.refArrayOps([Ljava/lang/Object;)Lscala/collection/mutable/ArrayOps;
> > > > > > at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.hudi.AvroConversionHelper$.createConverterToAvro(AvroConversionHelper.scala:341)
> > > > > > at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:46)
> > > > > > at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:42)
> > > > > > at
> org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2(RDD.scala:837)
> > > > > > at
> > > > >
> > >
> org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2$adapted(RDD.scala:837)
> > > > > > at
> > > > >
> > >
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> > > > > > at
> org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
> > > > > > at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
> > > > > > at
> > > > >
> > >
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> > > > > > at
> org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
> > > > > > at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
> > > > > > at
> > org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
> > > > > > at org.apache.spark.scheduler.Task.run(Task.scala:127)
> > > > > > at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
> > > > > > at
> > org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
> > > > > > at
> > > > org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
> > > > > > at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
> > > > > > at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
> > > > > > at java.lang.Thread.run(Thread.java:748)
> > > > > >
> > > > > > 20/01/13 11:42:46 ERROR TaskSetManager: Task 0 in stage 1.0
> failed
> > 1
> > > > > times;
> > > > > > aborting job
> > > > > > org.apache.spark.SparkException: Job aborted due to stage
> failure:
> > > > Task 0
> > > > > > in stage 1.0 failed 1 times, most recent failure: Lost task 0.0
> in
> > > > stage
> > > > > > 1.0 (TID 2, 192.168.1.209, executor driver):
> > > > java.lang.NoSuchMethodError:
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> scala.Predef$.refArrayOps([Ljava/lang/Object;)Lscala/collection/mutable/ArrayOps;
> > > > > > at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.hudi.AvroConversionHelper$.createConverterToAvro(AvroConversionHelper.scala:341)
> > > > > > at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:46)
> > > > > > at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:42)
> > > > > > at
> org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2(RDD.scala:837)
> > > > > > at
> > > > >
> > >
> org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2$adapted(RDD.scala:837)
> > > > > > at
> > > > >
> > >
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> > > > > > at
> org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
> > > > > > at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
> > > > > > at
> > > > >
> > >
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> > > > > > at
> org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
> > > > > > at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
> > > > > > at
> > org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
> > > > > > at org.apache.spark.scheduler.Task.run(Task.scala:127)
> > > > > > at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
> > > > > > at
> > org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
> > > > > > at
> > > > org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
> > > > > > at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
> > > > > > at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
> > > > > > at java.lang.Thread.run(Thread.java:748)
> > > > > >
> > > > > > Driver stacktrace:
> > > > > >   at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.scheduler.DAGScheduler.failJobAndIndependentStages(DAGScheduler.scala:1989)
> > > > > >   at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:1977)
> > > > > >   at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:1976)
> > > > > >   at
> > > > > >
> > > >
> > scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62)
> > > > > >   at
> > > > > >
> > > >
> > scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55)
> > > > > >   at
> > > scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49)
> > > > > >   at
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1976)
> > > > > >   at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:956)
> > > > > >   at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:956)
> > > > > >   at scala.Option.foreach(Option.scala:407)
> > > > > >   at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:956)
> > > > > >   at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2206)
> > > > > >   at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2155)
> > > > > >   at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2144)
> > > > > >   at
> > org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
> > > > > >   at
> > > > >
> > org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:758)
> > > > > >   at
> org.apache.spark.SparkContext.runJob(SparkContext.scala:2116)
> > > > > >   at
> org.apache.spark.SparkContext.runJob(SparkContext.scala:2137)
> > > > > >   at
> org.apache.spark.SparkContext.runJob(SparkContext.scala:2156)
> > > > > >   at org.apache.spark.rdd.RDD.$anonfun$take$1(RDD.scala:1423)
> > > > > >   at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
> > > > > >   at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
> > > > > >   at org.apache.spark.rdd.RDD.withScope(RDD.scala:388)
> > > > > >   at org.apache.spark.rdd.RDD.take(RDD.scala:1396)
> > > > > >   at org.apache.spark.rdd.RDD.$anonfun$isEmpty$1(RDD.scala:1531)
> > > > > >   at
> > > > >
> > scala.runtime.java8.JFunction0$mcZ$sp.apply(JFunction0$mcZ$sp.java:23)
> > > > > >   at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
> > > > > >   at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
> > > > > >   at org.apache.spark.rdd.RDD.withScope(RDD.scala:388)
> > > > > >   at org.apache.spark.rdd.RDD.isEmpty(RDD.scala:1531)
> > > > > >   at
> > > > org.apache.spark.api.java.JavaRDDLike.isEmpty(JavaRDDLike.scala:544)
> > > > > >   at
> > > > >
> org.apache.spark.api.java.JavaRDDLike.isEmpty$(JavaRDDLike.scala:544)
> > > > > >   at
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.api.java.AbstractJavaRDDLike.isEmpty(JavaRDDLike.scala:45)
> > > > > >   at
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.hudi.HoodieSparkSqlWriter$.write(HoodieSparkSqlWriter.scala:141)
> > > > > >   at
> > > > org.apache.hudi.DefaultSource.createRelation(DefaultSource.scala:91)
> > > > > >   at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.datasources.SaveIntoDataSourceCommand.run(SaveIntoDataSourceCommand.scala:46)
> > > > > >   at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:70)
> > > > > >   at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:68)
> > > > > >   at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.command.ExecutedCommandExec.doExecute(commands.scala:86)
> > > > > >   at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.SparkPlan.$anonfun$execute$1(SparkPlan.scala:173)
> > > > > >   at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.SparkPlan.$anonfun$executeQuery$1(SparkPlan.scala:211)
> > > > > >   at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
> > > > > >   at
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:208)
> > > > > >   at
> > > > >
> org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:169)
> > > > > >   at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:110)
> > > > > >   at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:109)
> > > > > >   at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.DataFrameWriter.$anonfun$runCommand$1(DataFrameWriter.scala:828)
> > > > > >   at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$4(SQLExecution.scala:100)
> > > > > >   at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:160)
> > > > > >   at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:87)
> > > > > >   at
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.DataFrameWriter.runCommand(DataFrameWriter.scala:828)
> > > > > >   at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.DataFrameWriter.saveToV1Source(DataFrameWriter.scala:309)
> > > > > >   at
> > > > org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:293)
> > > > > >   at
> > > > org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:236)
> > > > > >   ... 66 elided
> > > > > > Caused by: java.lang.NoSuchMethodError:
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> scala.Predef$.refArrayOps([Ljava/lang/Object;)Lscala/collection/mutable/ArrayOps;
> > > > > >   at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.hudi.AvroConversionHelper$.createConverterToAvro(AvroConversionHelper.scala:341)
> > > > > >   at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:46)
> > > > > >   at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:42)
> > > > > >   at
> > org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2(RDD.scala:837)
> > > > > >   at
> > > > > >
> > > >
> > org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2$adapted(RDD.scala:837)
> > > > > >   at
> > > > > >
> > > >
> > org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> > > > > >   at
> > org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
> > > > > >   at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
> > > > > >   at
> > > > > >
> > > >
> > org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> > > > > >   at
> > org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
> > > > > >   at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
> > > > > >   at
> > > org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
> > > > > >   at org.apache.spark.scheduler.Task.run(Task.scala:127)
> > > > > >   at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
> > > > > >   at
> > > org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
> > > > > >   at
> > > > >
> org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
> > > > > >   at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
> > > > > >   at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
> > > > > >   at java.lang.Thread.run(Thread.java:748)
> > > > > >
> > > > > > Just to unblock my work, I reverted my repo to a commit just
> before
> > > > > Udi'ts
> > > > > > PR(git checkout d9675c4ec0be3f342c30e17a4779c8319b207681) and
> tried
> > > > > running
> > > > > > the same.
> > > > > >
> > > > > > ./bin/spark-shell --packages com.databricks:spark-avro_2.11:3.2.0
> > > > --conf
> > > > > > 'spark.serializer=org.apache.spark.serializer.KryoSerializer'
> > --jars
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> /Users/sivabala/Documents/personal/projects/siva_hudi/hudi/packaging/hudi-spark-bundle/target/hudi-spark-bundle-0.5.1-SNAPSHOT.jar
> > > > > >
> > > > > > // initial imports.
> > > > > > ..
> > > > > > ..
> > > > > >
> > > > > > scala> df.write.format("org.apache.hudi").
> > > > > >      |     options(getQuickstartWriteConfigs).
> > > > > >      |     option(PRECOMBINE_FIELD_OPT_KEY, "ts").
> > > > > >      |     option(RECORDKEY_FIELD_OPT_KEY, "uuid").
> > > > > >      |     option(PARTITIONPATH_FIELD_OPT_KEY, "partitionpath").
> > > > > >      |     option(TABLE_NAME, tableName).
> > > > > >      |     mode(Overwrite).
> > > > > >      |     save(basePath);
> > > > > > java.lang.NoSuchMethodError:
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> scala.Predef$.refArrayOps([Ljava/lang/Object;)Lscala/collection/mutable/ArrayOps;
> > > > > >   at
> > > > > > org.apache.hudi.com
> > > > > >
> > > > >
> > > >
> > >
> >
> .databricks.spark.avro.SchemaConverters$.convertStructToAvro(SchemaConverters.scala:118)
> > > > > >   at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.hudi.AvroConversionUtils$.convertStructTypeToAvroSchema(AvroConversionUtils.scala:79)
> > > > > >   at
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.hudi.HoodieSparkSqlWriter$.write(HoodieSparkSqlWriter.scala:92)
> > > > > >   at
> > > > org.apache.hudi.DefaultSource.createRelation(DefaultSource.scala:91)
> > > > > >   at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.datasources.SaveIntoDataSourceCommand.run(SaveIntoDataSourceCommand.scala:46)
> > > > > >   at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:70)
> > > > > >   at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:68)
> > > > > >   at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.command.ExecutedCommandExec.doExecute(commands.scala:86)
> > > > > >   at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.SparkPlan.$anonfun$execute$1(SparkPlan.scala:173)
> > > > > >   at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.SparkPlan.$anonfun$executeQuery$1(SparkPlan.scala:211)
> > > > > >   at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
> > > > > >   at
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:208)
> > > > > >   at
> > > > >
> org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:169)
> > > > > >   at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:110)
> > > > > >   at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:109)
> > > > > >   at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.DataFrameWriter.$anonfun$runCommand$1(DataFrameWriter.scala:828)
> > > > > >   at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$4(SQLExecution.scala:100)
> > > > > >   at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:160)
> > > > > >   at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:87)
> > > > > >   at
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.DataFrameWriter.runCommand(DataFrameWriter.scala:828)
> > > > > >   at
> > > > > >
> > > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.DataFrameWriter.saveToV1Source(DataFrameWriter.scala:309)
> > > > > >   at
> > > > org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:293)
> > > > > >   at
> > > > org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:236)
> > > > > >
> > > > > >
> > > > > > --
> > > > > > Regards,
> > > > > > -Sivabalan
> > > > > >
> > > > >
> > > >
> > > >
> > > > --
> > > > Regards,
> > > > -Sivabalan
> > > >
> > >
> >
>
>
> --
> Regards,
> -Sivabalan
>

Re: Right Dependency for spark and scala in latest master

Posted by Sivabalan <n....@gmail.com>.
cool, thanks for the assistance Sudha. We have to fix the quick start docs
then accordingly.


On Tue, Jan 14, 2020 at 2:28 AM Bhavani Sudha <bh...@gmail.com>
wrote:

> Hi Siva,
>
> I was able to get past this issue by running from spark-shell( from version
> 2.4.4) and spark-avro (org.apache.spark:spark-avro_2.11:2.4.4). This is my
> command line for starting spark shell just for reference.
>
> spark-2.4.4-bin-hadoop2.7/bin/spark-shell --jars
>
> /<path_to_hudi>/packaging/hudi-spark-bundle/target/hudi-spark-bundle-0.5.1-SNAPSHOT.jar
> --packages org.apache.spark:spark-avro_2.11:2.4.4 --conf
> 'spark.serializer=org.apache.spark.serializer.KryoSerializer'
>
> I think we have to match both spark-shell version and corresponding
> spark-avro version to 2.4.4. Please try this to see if this unblocks you.
>
> Thanks,
> Sudha
>
> On Mon, Jan 13, 2020 at 6:29 PM Vinoth Chandar <vi...@apache.org> wrote:
>
> > I will triage this tonight and get back!
> >
> > On Mon, Jan 13, 2020 at 2:28 PM Sivabalan <n....@gmail.com> wrote:
> >
> > > Yes, that is what I tried. Is there any recommended version. I tried
> with
> > > 2.4.4. (My local spark from which I ran spark_shell
> > > is spark-3.0.0-preview2, guess that does not matter).
> > >
> > > ./bin/spark-shell --packages org.apache.spark:spark-avro_2.11:2.4.4
> > --conf
> > > 'spark.serializer=org.apache.spark.serializer.KryoSerializer' --jars
> > >
> > >
> >
> /Users/sivabala/Documents/personal/projects/siva_hudi/hudi/packaging/hudi-spark-bundle/target/hudi-spark-bundle-0.5.1-SNAPSHOT.jar
> > >
> > >
> > > On Mon, Jan 13, 2020 at 3:54 PM Vinoth Chandar <vi...@apache.org>
> > wrote:
> > >
> > > > Hi Siva,
> > > >
> > > > In general, we need to match the
> > > >  spark-avro_2.11:<spark_version_you_are_running> .. With this change,
> > we
> > > > effectively dropped support for spark versions older than 2.4.
> > > > Are you running on a older spark version?
> > > >
> > > >
> > > >
> > > > On Mon, Jan 13, 2020 at 10:03 AM Sivabalan <n....@gmail.com>
> wrote:
> > > >
> > > > > Hey folks,
> > > > >    I am running into scala dependency issue w/ latest master while
> > > trying
> > > > > to run the Quick Start. Can someone help me out on right
> dependency.
> > > > >
> > > > > I see that with Udit's latest PR, we have to specify explicit
> > packages
> > > > for
> > > > > spark-avro. Tried with spark-avro_2.11:2.4.4.
> > > > >
> > > > > scala> df.write.format("org.apache.hudi").
> > > > >      |     options(getQuickstartWriteConfigs).
> > > > >      |     option(PRECOMBINE_FIELD_OPT_KEY, "ts").
> > > > >      |     option(RECORDKEY_FIELD_OPT_KEY, "uuid").
> > > > >      |     option(PARTITIONPATH_FIELD_OPT_KEY, "partitionpath").
> > > > >      |     option(TABLE_NAME, tableName).
> > > > >      |     mode(Overwrite).
> > > > >      |     save(basePath);
> > > > > java.util.ServiceConfigurationError:
> > > > > org.apache.spark.sql.sources.DataSourceRegister: Provider
> > > > > org.apache.spark.sql.avro.AvroFileFormat could not be instantiated
> > > > >   at java.util.ServiceLoader.fail(ServiceLoader.java:232)
> > > > >   at java.util.ServiceLoader.access$100(ServiceLoader.java:185)
> > > > >   at
> > > > >
> > >
> java.util.ServiceLoader$LazyIterator.nextService(ServiceLoader.java:384)
> > > > >   at
> > java.util.ServiceLoader$LazyIterator.next(ServiceLoader.java:404)
> > > > >   at java.util.ServiceLoader$1.next(ServiceLoader.java:480)
> > > > >   at
> > > > >
> > > >
> > >
> >
> scala.collection.convert.Wrappers$JIteratorWrapper.next(Wrappers.scala:44)
> > > > >   at scala.collection.Iterator.foreach(Iterator.scala:941)
> > > > >   at scala.collection.Iterator.foreach$(Iterator.scala:941)
> > > > >   at scala.collection.AbstractIterator.foreach(Iterator.scala:1429)
> > > > >   at scala.collection.IterableLike.foreach(IterableLike.scala:74)
> > > > >   at scala.collection.IterableLike.foreach$(IterableLike.scala:73)
> > > > >   at scala.collection.AbstractIterable.foreach(Iterable.scala:56)
> > > > >   at
> > > >
> scala.collection.TraversableLike.filterImpl(TraversableLike.scala:255)
> > > > >   at
> > > > >
> > scala.collection.TraversableLike.filterImpl$(TraversableLike.scala:249)
> > > > >   at
> > > >
> scala.collection.AbstractTraversable.filterImpl(Traversable.scala:108)
> > > > >   at
> > scala.collection.TraversableLike.filter(TraversableLike.scala:347)
> > > > >   at
> > > scala.collection.TraversableLike.filter$(TraversableLike.scala:347)
> > > > >   at
> > scala.collection.AbstractTraversable.filter(Traversable.scala:108)
> > > > >   at
> > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.datasources.DataSource$.lookupDataSource(DataSource.scala:644)
> > > > >   at
> > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.datasources.DataSource$.lookupDataSourceV2(DataSource.scala:728)
> > > > >   at
> > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.DataFrameWriter.lookupV2Provider(DataFrameWriter.scala:832)
> > > > >   at
> > > org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:252)
> > > > >   at
> > > org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:236)
> > > > >   ... 66 elided
> > > > > Caused by: java.lang.NoClassDefFoundError:
> > > > > org/apache/spark/sql/execution/datasources/FileFormat$class
> > > > >   at
> > > > >
> > >
> org.apache.spark.sql.avro.AvroFileFormat.<init>(AvroFileFormat.scala:44)
> > > > >   at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native
> > > > Method)
> > > > >   at
> > > > >
> > > > >
> > > >
> > >
> >
> sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
> > > > >   at
> > > > >
> > > > >
> > > >
> > >
> >
> sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
> > > > >   at
> java.lang.reflect.Constructor.newInstance(Constructor.java:423)
> > > > >   at java.lang.Class.newInstance(Class.java:442)
> > > > >   at
> > > > >
> > >
> java.util.ServiceLoader$LazyIterator.nextService(ServiceLoader.java:380)
> > > > >   ... 86 more
> > > > > Caused by: java.lang.ClassNotFoundException:
> > > > > org.apache.spark.sql.execution.datasources.FileFormat$class
> > > > >   at java.net.URLClassLoader.findClass(URLClassLoader.java:382)
> > > > >   at java.lang.ClassLoader.loadClass(ClassLoader.java:424)
> > > > >   at java.lang.ClassLoader.loadClass(ClassLoader.java:357)
> > > > >   ... 93 more
> > > > >
> > > > >
> > > > > So, tried with 2.12.
> > > > >
> > > > > ./bin/spark-shell --packages org.apache.spark:spark-avro_2.12:2.4.4
> > > > --conf
> > > > > 'spark.serializer=org.apache.spark.serializer.KryoSerializer'
> --jars
> > > > >
> > > > >
> > > >
> > >
> >
> /Users/sivabala/Documents/personal/projects/siva_hudi/hudi/packaging/hudi-spark-bundle/target/hudi-spark-bundle-0.5.1-SNAPSHOT.jar
> > > > >
> > > > > scala> df.write.format("org.apache.hudi").
> > > > >      |     options(getQuickstartWriteConfigs).
> > > > >      |     option(PRECOMBINE_FIELD_OPT_KEY, "ts").
> > > > >      |     option(RECORDKEY_FIELD_OPT_KEY, "uuid").
> > > > >      |     option(PARTITIONPATH_FIELD_OPT_KEY, "partitionpath").
> > > > >      |     option(TABLE_NAME, tableName).
> > > > >      |     mode(Overwrite).
> > > > >      |     save(basePath);
> > > > > 20/01/13 11:42:45 ERROR Executor: Exception in task 0.0 in stage
> 1.0
> > > (TID
> > > > > 2)
> > > > > java.lang.NoSuchMethodError:
> > > > >
> > > > >
> > > >
> > >
> >
> scala.Predef$.refArrayOps([Ljava/lang/Object;)Lscala/collection/mutable/ArrayOps;
> > > > > at
> > > > >
> > > > >
> > > >
> > >
> >
> org.apache.hudi.AvroConversionHelper$.createConverterToAvro(AvroConversionHelper.scala:341)
> > > > > at
> > > > >
> > > > >
> > > >
> > >
> >
> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:46)
> > > > > at
> > > > >
> > > > >
> > > >
> > >
> >
> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:42)
> > > > > at org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2(RDD.scala:837)
> > > > > at
> > > >
> > org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2$adapted(RDD.scala:837)
> > > > > at
> > > >
> > org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> > > > > at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
> > > > > at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
> > > > > at
> > > >
> > org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> > > > > at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
> > > > > at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
> > > > > at
> org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
> > > > > at org.apache.spark.scheduler.Task.run(Task.scala:127)
> > > > > at
> > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
> > > > > at
> org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
> > > > > at
> > > org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
> > > > > at
> > > > >
> > > > >
> > > >
> > >
> >
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
> > > > > at
> > > > >
> > > > >
> > > >
> > >
> >
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
> > > > > at java.lang.Thread.run(Thread.java:748)
> > > > > 20/01/13 11:42:46 WARN TaskSetManager: Lost task 0.0 in stage 1.0
> > (TID
> > > 2,
> > > > > 192.168.1.209, executor driver): java.lang.NoSuchMethodError:
> > > > >
> > > > >
> > > >
> > >
> >
> scala.Predef$.refArrayOps([Ljava/lang/Object;)Lscala/collection/mutable/ArrayOps;
> > > > > at
> > > > >
> > > > >
> > > >
> > >
> >
> org.apache.hudi.AvroConversionHelper$.createConverterToAvro(AvroConversionHelper.scala:341)
> > > > > at
> > > > >
> > > > >
> > > >
> > >
> >
> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:46)
> > > > > at
> > > > >
> > > > >
> > > >
> > >
> >
> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:42)
> > > > > at org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2(RDD.scala:837)
> > > > > at
> > > >
> > org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2$adapted(RDD.scala:837)
> > > > > at
> > > >
> > org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> > > > > at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
> > > > > at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
> > > > > at
> > > >
> > org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> > > > > at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
> > > > > at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
> > > > > at
> org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
> > > > > at org.apache.spark.scheduler.Task.run(Task.scala:127)
> > > > > at
> > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
> > > > > at
> org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
> > > > > at
> > > org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
> > > > > at
> > > > >
> > > > >
> > > >
> > >
> >
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
> > > > > at
> > > > >
> > > > >
> > > >
> > >
> >
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
> > > > > at java.lang.Thread.run(Thread.java:748)
> > > > >
> > > > > 20/01/13 11:42:46 ERROR TaskSetManager: Task 0 in stage 1.0 failed
> 1
> > > > times;
> > > > > aborting job
> > > > > org.apache.spark.SparkException: Job aborted due to stage failure:
> > > Task 0
> > > > > in stage 1.0 failed 1 times, most recent failure: Lost task 0.0 in
> > > stage
> > > > > 1.0 (TID 2, 192.168.1.209, executor driver):
> > > java.lang.NoSuchMethodError:
> > > > >
> > > > >
> > > >
> > >
> >
> scala.Predef$.refArrayOps([Ljava/lang/Object;)Lscala/collection/mutable/ArrayOps;
> > > > > at
> > > > >
> > > > >
> > > >
> > >
> >
> org.apache.hudi.AvroConversionHelper$.createConverterToAvro(AvroConversionHelper.scala:341)
> > > > > at
> > > > >
> > > > >
> > > >
> > >
> >
> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:46)
> > > > > at
> > > > >
> > > > >
> > > >
> > >
> >
> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:42)
> > > > > at org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2(RDD.scala:837)
> > > > > at
> > > >
> > org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2$adapted(RDD.scala:837)
> > > > > at
> > > >
> > org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> > > > > at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
> > > > > at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
> > > > > at
> > > >
> > org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> > > > > at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
> > > > > at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
> > > > > at
> org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
> > > > > at org.apache.spark.scheduler.Task.run(Task.scala:127)
> > > > > at
> > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
> > > > > at
> org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
> > > > > at
> > > org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
> > > > > at
> > > > >
> > > > >
> > > >
> > >
> >
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
> > > > > at
> > > > >
> > > > >
> > > >
> > >
> >
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
> > > > > at java.lang.Thread.run(Thread.java:748)
> > > > >
> > > > > Driver stacktrace:
> > > > >   at
> > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.scheduler.DAGScheduler.failJobAndIndependentStages(DAGScheduler.scala:1989)
> > > > >   at
> > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:1977)
> > > > >   at
> > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:1976)
> > > > >   at
> > > > >
> > >
> scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62)
> > > > >   at
> > > > >
> > >
> scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55)
> > > > >   at
> > scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49)
> > > > >   at
> > > > >
> > > >
> > >
> >
> org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1976)
> > > > >   at
> > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:956)
> > > > >   at
> > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:956)
> > > > >   at scala.Option.foreach(Option.scala:407)
> > > > >   at
> > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:956)
> > > > >   at
> > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2206)
> > > > >   at
> > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2155)
> > > > >   at
> > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2144)
> > > > >   at
> org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
> > > > >   at
> > > >
> org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:758)
> > > > >   at org.apache.spark.SparkContext.runJob(SparkContext.scala:2116)
> > > > >   at org.apache.spark.SparkContext.runJob(SparkContext.scala:2137)
> > > > >   at org.apache.spark.SparkContext.runJob(SparkContext.scala:2156)
> > > > >   at org.apache.spark.rdd.RDD.$anonfun$take$1(RDD.scala:1423)
> > > > >   at
> > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
> > > > >   at
> > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
> > > > >   at org.apache.spark.rdd.RDD.withScope(RDD.scala:388)
> > > > >   at org.apache.spark.rdd.RDD.take(RDD.scala:1396)
> > > > >   at org.apache.spark.rdd.RDD.$anonfun$isEmpty$1(RDD.scala:1531)
> > > > >   at
> > > >
> scala.runtime.java8.JFunction0$mcZ$sp.apply(JFunction0$mcZ$sp.java:23)
> > > > >   at
> > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
> > > > >   at
> > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
> > > > >   at org.apache.spark.rdd.RDD.withScope(RDD.scala:388)
> > > > >   at org.apache.spark.rdd.RDD.isEmpty(RDD.scala:1531)
> > > > >   at
> > > org.apache.spark.api.java.JavaRDDLike.isEmpty(JavaRDDLike.scala:544)
> > > > >   at
> > > > org.apache.spark.api.java.JavaRDDLike.isEmpty$(JavaRDDLike.scala:544)
> > > > >   at
> > > > >
> > > >
> > >
> >
> org.apache.spark.api.java.AbstractJavaRDDLike.isEmpty(JavaRDDLike.scala:45)
> > > > >   at
> > > > >
> > > >
> > >
> >
> org.apache.hudi.HoodieSparkSqlWriter$.write(HoodieSparkSqlWriter.scala:141)
> > > > >   at
> > > org.apache.hudi.DefaultSource.createRelation(DefaultSource.scala:91)
> > > > >   at
> > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.datasources.SaveIntoDataSourceCommand.run(SaveIntoDataSourceCommand.scala:46)
> > > > >   at
> > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:70)
> > > > >   at
> > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:68)
> > > > >   at
> > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.command.ExecutedCommandExec.doExecute(commands.scala:86)
> > > > >   at
> > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.SparkPlan.$anonfun$execute$1(SparkPlan.scala:173)
> > > > >   at
> > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.SparkPlan.$anonfun$executeQuery$1(SparkPlan.scala:211)
> > > > >   at
> > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
> > > > >   at
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:208)
> > > > >   at
> > > > org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:169)
> > > > >   at
> > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:110)
> > > > >   at
> > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:109)
> > > > >   at
> > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.DataFrameWriter.$anonfun$runCommand$1(DataFrameWriter.scala:828)
> > > > >   at
> > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$4(SQLExecution.scala:100)
> > > > >   at
> > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:160)
> > > > >   at
> > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:87)
> > > > >   at
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.DataFrameWriter.runCommand(DataFrameWriter.scala:828)
> > > > >   at
> > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.DataFrameWriter.saveToV1Source(DataFrameWriter.scala:309)
> > > > >   at
> > > org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:293)
> > > > >   at
> > > org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:236)
> > > > >   ... 66 elided
> > > > > Caused by: java.lang.NoSuchMethodError:
> > > > >
> > > > >
> > > >
> > >
> >
> scala.Predef$.refArrayOps([Ljava/lang/Object;)Lscala/collection/mutable/ArrayOps;
> > > > >   at
> > > > >
> > > > >
> > > >
> > >
> >
> org.apache.hudi.AvroConversionHelper$.createConverterToAvro(AvroConversionHelper.scala:341)
> > > > >   at
> > > > >
> > > > >
> > > >
> > >
> >
> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:46)
> > > > >   at
> > > > >
> > > > >
> > > >
> > >
> >
> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:42)
> > > > >   at
> org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2(RDD.scala:837)
> > > > >   at
> > > > >
> > >
> org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2$adapted(RDD.scala:837)
> > > > >   at
> > > > >
> > >
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> > > > >   at
> org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
> > > > >   at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
> > > > >   at
> > > > >
> > >
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> > > > >   at
> org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
> > > > >   at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
> > > > >   at
> > org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
> > > > >   at org.apache.spark.scheduler.Task.run(Task.scala:127)
> > > > >   at
> > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
> > > > >   at
> > org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
> > > > >   at
> > > > org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
> > > > >   at
> > > > >
> > > > >
> > > >
> > >
> >
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
> > > > >   at
> > > > >
> > > > >
> > > >
> > >
> >
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
> > > > >   at java.lang.Thread.run(Thread.java:748)
> > > > >
> > > > > Just to unblock my work, I reverted my repo to a commit just before
> > > > Udi'ts
> > > > > PR(git checkout d9675c4ec0be3f342c30e17a4779c8319b207681) and tried
> > > > running
> > > > > the same.
> > > > >
> > > > > ./bin/spark-shell --packages com.databricks:spark-avro_2.11:3.2.0
> > > --conf
> > > > > 'spark.serializer=org.apache.spark.serializer.KryoSerializer'
> --jars
> > > > >
> > > > >
> > > >
> > >
> >
> /Users/sivabala/Documents/personal/projects/siva_hudi/hudi/packaging/hudi-spark-bundle/target/hudi-spark-bundle-0.5.1-SNAPSHOT.jar
> > > > >
> > > > > // initial imports.
> > > > > ..
> > > > > ..
> > > > >
> > > > > scala> df.write.format("org.apache.hudi").
> > > > >      |     options(getQuickstartWriteConfigs).
> > > > >      |     option(PRECOMBINE_FIELD_OPT_KEY, "ts").
> > > > >      |     option(RECORDKEY_FIELD_OPT_KEY, "uuid").
> > > > >      |     option(PARTITIONPATH_FIELD_OPT_KEY, "partitionpath").
> > > > >      |     option(TABLE_NAME, tableName).
> > > > >      |     mode(Overwrite).
> > > > >      |     save(basePath);
> > > > > java.lang.NoSuchMethodError:
> > > > >
> > > > >
> > > >
> > >
> >
> scala.Predef$.refArrayOps([Ljava/lang/Object;)Lscala/collection/mutable/ArrayOps;
> > > > >   at
> > > > > org.apache.hudi.com
> > > > >
> > > >
> > >
> >
> .databricks.spark.avro.SchemaConverters$.convertStructToAvro(SchemaConverters.scala:118)
> > > > >   at
> > > > >
> > > > >
> > > >
> > >
> >
> org.apache.hudi.AvroConversionUtils$.convertStructTypeToAvroSchema(AvroConversionUtils.scala:79)
> > > > >   at
> > > > >
> > > >
> > >
> >
> org.apache.hudi.HoodieSparkSqlWriter$.write(HoodieSparkSqlWriter.scala:92)
> > > > >   at
> > > org.apache.hudi.DefaultSource.createRelation(DefaultSource.scala:91)
> > > > >   at
> > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.datasources.SaveIntoDataSourceCommand.run(SaveIntoDataSourceCommand.scala:46)
> > > > >   at
> > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:70)
> > > > >   at
> > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:68)
> > > > >   at
> > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.command.ExecutedCommandExec.doExecute(commands.scala:86)
> > > > >   at
> > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.SparkPlan.$anonfun$execute$1(SparkPlan.scala:173)
> > > > >   at
> > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.SparkPlan.$anonfun$executeQuery$1(SparkPlan.scala:211)
> > > > >   at
> > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
> > > > >   at
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:208)
> > > > >   at
> > > > org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:169)
> > > > >   at
> > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:110)
> > > > >   at
> > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:109)
> > > > >   at
> > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.DataFrameWriter.$anonfun$runCommand$1(DataFrameWriter.scala:828)
> > > > >   at
> > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$4(SQLExecution.scala:100)
> > > > >   at
> > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:160)
> > > > >   at
> > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:87)
> > > > >   at
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.DataFrameWriter.runCommand(DataFrameWriter.scala:828)
> > > > >   at
> > > > >
> > > > >
> > > >
> > >
> >
> org.apache.spark.sql.DataFrameWriter.saveToV1Source(DataFrameWriter.scala:309)
> > > > >   at
> > > org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:293)
> > > > >   at
> > > org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:236)
> > > > >
> > > > >
> > > > > --
> > > > > Regards,
> > > > > -Sivabalan
> > > > >
> > > >
> > >
> > >
> > > --
> > > Regards,
> > > -Sivabalan
> > >
> >
>


-- 
Regards,
-Sivabalan

Re: Right Dependency for spark and scala in latest master

Posted by Bhavani Sudha <bh...@gmail.com>.
Hi Siva,

I was able to get past this issue by running from spark-shell( from version
2.4.4) and spark-avro (org.apache.spark:spark-avro_2.11:2.4.4). This is my
command line for starting spark shell just for reference.

spark-2.4.4-bin-hadoop2.7/bin/spark-shell --jars
/<path_to_hudi>/packaging/hudi-spark-bundle/target/hudi-spark-bundle-0.5.1-SNAPSHOT.jar
--packages org.apache.spark:spark-avro_2.11:2.4.4 --conf
'spark.serializer=org.apache.spark.serializer.KryoSerializer'

I think we have to match both spark-shell version and corresponding
spark-avro version to 2.4.4. Please try this to see if this unblocks you.

Thanks,
Sudha

On Mon, Jan 13, 2020 at 6:29 PM Vinoth Chandar <vi...@apache.org> wrote:

> I will triage this tonight and get back!
>
> On Mon, Jan 13, 2020 at 2:28 PM Sivabalan <n....@gmail.com> wrote:
>
> > Yes, that is what I tried. Is there any recommended version. I tried with
> > 2.4.4. (My local spark from which I ran spark_shell
> > is spark-3.0.0-preview2, guess that does not matter).
> >
> > ./bin/spark-shell --packages org.apache.spark:spark-avro_2.11:2.4.4
> --conf
> > 'spark.serializer=org.apache.spark.serializer.KryoSerializer' --jars
> >
> >
> /Users/sivabala/Documents/personal/projects/siva_hudi/hudi/packaging/hudi-spark-bundle/target/hudi-spark-bundle-0.5.1-SNAPSHOT.jar
> >
> >
> > On Mon, Jan 13, 2020 at 3:54 PM Vinoth Chandar <vi...@apache.org>
> wrote:
> >
> > > Hi Siva,
> > >
> > > In general, we need to match the
> > >  spark-avro_2.11:<spark_version_you_are_running> .. With this change,
> we
> > > effectively dropped support for spark versions older than 2.4.
> > > Are you running on a older spark version?
> > >
> > >
> > >
> > > On Mon, Jan 13, 2020 at 10:03 AM Sivabalan <n....@gmail.com> wrote:
> > >
> > > > Hey folks,
> > > >    I am running into scala dependency issue w/ latest master while
> > trying
> > > > to run the Quick Start. Can someone help me out on right dependency.
> > > >
> > > > I see that with Udit's latest PR, we have to specify explicit
> packages
> > > for
> > > > spark-avro. Tried with spark-avro_2.11:2.4.4.
> > > >
> > > > scala> df.write.format("org.apache.hudi").
> > > >      |     options(getQuickstartWriteConfigs).
> > > >      |     option(PRECOMBINE_FIELD_OPT_KEY, "ts").
> > > >      |     option(RECORDKEY_FIELD_OPT_KEY, "uuid").
> > > >      |     option(PARTITIONPATH_FIELD_OPT_KEY, "partitionpath").
> > > >      |     option(TABLE_NAME, tableName).
> > > >      |     mode(Overwrite).
> > > >      |     save(basePath);
> > > > java.util.ServiceConfigurationError:
> > > > org.apache.spark.sql.sources.DataSourceRegister: Provider
> > > > org.apache.spark.sql.avro.AvroFileFormat could not be instantiated
> > > >   at java.util.ServiceLoader.fail(ServiceLoader.java:232)
> > > >   at java.util.ServiceLoader.access$100(ServiceLoader.java:185)
> > > >   at
> > > >
> > java.util.ServiceLoader$LazyIterator.nextService(ServiceLoader.java:384)
> > > >   at
> java.util.ServiceLoader$LazyIterator.next(ServiceLoader.java:404)
> > > >   at java.util.ServiceLoader$1.next(ServiceLoader.java:480)
> > > >   at
> > > >
> > >
> >
> scala.collection.convert.Wrappers$JIteratorWrapper.next(Wrappers.scala:44)
> > > >   at scala.collection.Iterator.foreach(Iterator.scala:941)
> > > >   at scala.collection.Iterator.foreach$(Iterator.scala:941)
> > > >   at scala.collection.AbstractIterator.foreach(Iterator.scala:1429)
> > > >   at scala.collection.IterableLike.foreach(IterableLike.scala:74)
> > > >   at scala.collection.IterableLike.foreach$(IterableLike.scala:73)
> > > >   at scala.collection.AbstractIterable.foreach(Iterable.scala:56)
> > > >   at
> > > scala.collection.TraversableLike.filterImpl(TraversableLike.scala:255)
> > > >   at
> > > >
> scala.collection.TraversableLike.filterImpl$(TraversableLike.scala:249)
> > > >   at
> > > scala.collection.AbstractTraversable.filterImpl(Traversable.scala:108)
> > > >   at
> scala.collection.TraversableLike.filter(TraversableLike.scala:347)
> > > >   at
> > scala.collection.TraversableLike.filter$(TraversableLike.scala:347)
> > > >   at
> scala.collection.AbstractTraversable.filter(Traversable.scala:108)
> > > >   at
> > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.datasources.DataSource$.lookupDataSource(DataSource.scala:644)
> > > >   at
> > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.datasources.DataSource$.lookupDataSourceV2(DataSource.scala:728)
> > > >   at
> > > >
> > > >
> > >
> >
> org.apache.spark.sql.DataFrameWriter.lookupV2Provider(DataFrameWriter.scala:832)
> > > >   at
> > org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:252)
> > > >   at
> > org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:236)
> > > >   ... 66 elided
> > > > Caused by: java.lang.NoClassDefFoundError:
> > > > org/apache/spark/sql/execution/datasources/FileFormat$class
> > > >   at
> > > >
> > org.apache.spark.sql.avro.AvroFileFormat.<init>(AvroFileFormat.scala:44)
> > > >   at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native
> > > Method)
> > > >   at
> > > >
> > > >
> > >
> >
> sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
> > > >   at
> > > >
> > > >
> > >
> >
> sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
> > > >   at java.lang.reflect.Constructor.newInstance(Constructor.java:423)
> > > >   at java.lang.Class.newInstance(Class.java:442)
> > > >   at
> > > >
> > java.util.ServiceLoader$LazyIterator.nextService(ServiceLoader.java:380)
> > > >   ... 86 more
> > > > Caused by: java.lang.ClassNotFoundException:
> > > > org.apache.spark.sql.execution.datasources.FileFormat$class
> > > >   at java.net.URLClassLoader.findClass(URLClassLoader.java:382)
> > > >   at java.lang.ClassLoader.loadClass(ClassLoader.java:424)
> > > >   at java.lang.ClassLoader.loadClass(ClassLoader.java:357)
> > > >   ... 93 more
> > > >
> > > >
> > > > So, tried with 2.12.
> > > >
> > > > ./bin/spark-shell --packages org.apache.spark:spark-avro_2.12:2.4.4
> > > --conf
> > > > 'spark.serializer=org.apache.spark.serializer.KryoSerializer' --jars
> > > >
> > > >
> > >
> >
> /Users/sivabala/Documents/personal/projects/siva_hudi/hudi/packaging/hudi-spark-bundle/target/hudi-spark-bundle-0.5.1-SNAPSHOT.jar
> > > >
> > > > scala> df.write.format("org.apache.hudi").
> > > >      |     options(getQuickstartWriteConfigs).
> > > >      |     option(PRECOMBINE_FIELD_OPT_KEY, "ts").
> > > >      |     option(RECORDKEY_FIELD_OPT_KEY, "uuid").
> > > >      |     option(PARTITIONPATH_FIELD_OPT_KEY, "partitionpath").
> > > >      |     option(TABLE_NAME, tableName).
> > > >      |     mode(Overwrite).
> > > >      |     save(basePath);
> > > > 20/01/13 11:42:45 ERROR Executor: Exception in task 0.0 in stage 1.0
> > (TID
> > > > 2)
> > > > java.lang.NoSuchMethodError:
> > > >
> > > >
> > >
> >
> scala.Predef$.refArrayOps([Ljava/lang/Object;)Lscala/collection/mutable/ArrayOps;
> > > > at
> > > >
> > > >
> > >
> >
> org.apache.hudi.AvroConversionHelper$.createConverterToAvro(AvroConversionHelper.scala:341)
> > > > at
> > > >
> > > >
> > >
> >
> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:46)
> > > > at
> > > >
> > > >
> > >
> >
> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:42)
> > > > at org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2(RDD.scala:837)
> > > > at
> > >
> org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2$adapted(RDD.scala:837)
> > > > at
> > >
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> > > > at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
> > > > at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
> > > > at
> > >
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> > > > at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
> > > > at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
> > > > at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
> > > > at org.apache.spark.scheduler.Task.run(Task.scala:127)
> > > > at
> > > >
> > > >
> > >
> >
> org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
> > > > at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
> > > > at
> > org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
> > > > at
> > > >
> > > >
> > >
> >
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
> > > > at
> > > >
> > > >
> > >
> >
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
> > > > at java.lang.Thread.run(Thread.java:748)
> > > > 20/01/13 11:42:46 WARN TaskSetManager: Lost task 0.0 in stage 1.0
> (TID
> > 2,
> > > > 192.168.1.209, executor driver): java.lang.NoSuchMethodError:
> > > >
> > > >
> > >
> >
> scala.Predef$.refArrayOps([Ljava/lang/Object;)Lscala/collection/mutable/ArrayOps;
> > > > at
> > > >
> > > >
> > >
> >
> org.apache.hudi.AvroConversionHelper$.createConverterToAvro(AvroConversionHelper.scala:341)
> > > > at
> > > >
> > > >
> > >
> >
> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:46)
> > > > at
> > > >
> > > >
> > >
> >
> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:42)
> > > > at org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2(RDD.scala:837)
> > > > at
> > >
> org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2$adapted(RDD.scala:837)
> > > > at
> > >
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> > > > at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
> > > > at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
> > > > at
> > >
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> > > > at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
> > > > at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
> > > > at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
> > > > at org.apache.spark.scheduler.Task.run(Task.scala:127)
> > > > at
> > > >
> > > >
> > >
> >
> org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
> > > > at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
> > > > at
> > org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
> > > > at
> > > >
> > > >
> > >
> >
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
> > > > at
> > > >
> > > >
> > >
> >
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
> > > > at java.lang.Thread.run(Thread.java:748)
> > > >
> > > > 20/01/13 11:42:46 ERROR TaskSetManager: Task 0 in stage 1.0 failed 1
> > > times;
> > > > aborting job
> > > > org.apache.spark.SparkException: Job aborted due to stage failure:
> > Task 0
> > > > in stage 1.0 failed 1 times, most recent failure: Lost task 0.0 in
> > stage
> > > > 1.0 (TID 2, 192.168.1.209, executor driver):
> > java.lang.NoSuchMethodError:
> > > >
> > > >
> > >
> >
> scala.Predef$.refArrayOps([Ljava/lang/Object;)Lscala/collection/mutable/ArrayOps;
> > > > at
> > > >
> > > >
> > >
> >
> org.apache.hudi.AvroConversionHelper$.createConverterToAvro(AvroConversionHelper.scala:341)
> > > > at
> > > >
> > > >
> > >
> >
> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:46)
> > > > at
> > > >
> > > >
> > >
> >
> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:42)
> > > > at org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2(RDD.scala:837)
> > > > at
> > >
> org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2$adapted(RDD.scala:837)
> > > > at
> > >
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> > > > at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
> > > > at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
> > > > at
> > >
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> > > > at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
> > > > at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
> > > > at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
> > > > at org.apache.spark.scheduler.Task.run(Task.scala:127)
> > > > at
> > > >
> > > >
> > >
> >
> org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
> > > > at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
> > > > at
> > org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
> > > > at
> > > >
> > > >
> > >
> >
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
> > > > at
> > > >
> > > >
> > >
> >
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
> > > > at java.lang.Thread.run(Thread.java:748)
> > > >
> > > > Driver stacktrace:
> > > >   at
> > > >
> > > >
> > >
> >
> org.apache.spark.scheduler.DAGScheduler.failJobAndIndependentStages(DAGScheduler.scala:1989)
> > > >   at
> > > >
> > > >
> > >
> >
> org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:1977)
> > > >   at
> > > >
> > > >
> > >
> >
> org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:1976)
> > > >   at
> > > >
> > scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62)
> > > >   at
> > > >
> > scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55)
> > > >   at
> scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49)
> > > >   at
> > > >
> > >
> >
> org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1976)
> > > >   at
> > > >
> > > >
> > >
> >
> org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:956)
> > > >   at
> > > >
> > > >
> > >
> >
> org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:956)
> > > >   at scala.Option.foreach(Option.scala:407)
> > > >   at
> > > >
> > > >
> > >
> >
> org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:956)
> > > >   at
> > > >
> > > >
> > >
> >
> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2206)
> > > >   at
> > > >
> > > >
> > >
> >
> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2155)
> > > >   at
> > > >
> > > >
> > >
> >
> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2144)
> > > >   at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
> > > >   at
> > > org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:758)
> > > >   at org.apache.spark.SparkContext.runJob(SparkContext.scala:2116)
> > > >   at org.apache.spark.SparkContext.runJob(SparkContext.scala:2137)
> > > >   at org.apache.spark.SparkContext.runJob(SparkContext.scala:2156)
> > > >   at org.apache.spark.rdd.RDD.$anonfun$take$1(RDD.scala:1423)
> > > >   at
> > > >
> > > >
> > >
> >
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
> > > >   at
> > > >
> > > >
> > >
> >
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
> > > >   at org.apache.spark.rdd.RDD.withScope(RDD.scala:388)
> > > >   at org.apache.spark.rdd.RDD.take(RDD.scala:1396)
> > > >   at org.apache.spark.rdd.RDD.$anonfun$isEmpty$1(RDD.scala:1531)
> > > >   at
> > > scala.runtime.java8.JFunction0$mcZ$sp.apply(JFunction0$mcZ$sp.java:23)
> > > >   at
> > > >
> > > >
> > >
> >
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
> > > >   at
> > > >
> > > >
> > >
> >
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
> > > >   at org.apache.spark.rdd.RDD.withScope(RDD.scala:388)
> > > >   at org.apache.spark.rdd.RDD.isEmpty(RDD.scala:1531)
> > > >   at
> > org.apache.spark.api.java.JavaRDDLike.isEmpty(JavaRDDLike.scala:544)
> > > >   at
> > > org.apache.spark.api.java.JavaRDDLike.isEmpty$(JavaRDDLike.scala:544)
> > > >   at
> > > >
> > >
> >
> org.apache.spark.api.java.AbstractJavaRDDLike.isEmpty(JavaRDDLike.scala:45)
> > > >   at
> > > >
> > >
> >
> org.apache.hudi.HoodieSparkSqlWriter$.write(HoodieSparkSqlWriter.scala:141)
> > > >   at
> > org.apache.hudi.DefaultSource.createRelation(DefaultSource.scala:91)
> > > >   at
> > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.datasources.SaveIntoDataSourceCommand.run(SaveIntoDataSourceCommand.scala:46)
> > > >   at
> > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:70)
> > > >   at
> > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:68)
> > > >   at
> > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.command.ExecutedCommandExec.doExecute(commands.scala:86)
> > > >   at
> > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.SparkPlan.$anonfun$execute$1(SparkPlan.scala:173)
> > > >   at
> > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.SparkPlan.$anonfun$executeQuery$1(SparkPlan.scala:211)
> > > >   at
> > > >
> > > >
> > >
> >
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
> > > >   at
> > > >
> > >
> >
> org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:208)
> > > >   at
> > > org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:169)
> > > >   at
> > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:110)
> > > >   at
> > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:109)
> > > >   at
> > > >
> > > >
> > >
> >
> org.apache.spark.sql.DataFrameWriter.$anonfun$runCommand$1(DataFrameWriter.scala:828)
> > > >   at
> > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$4(SQLExecution.scala:100)
> > > >   at
> > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:160)
> > > >   at
> > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:87)
> > > >   at
> > > >
> > >
> >
> org.apache.spark.sql.DataFrameWriter.runCommand(DataFrameWriter.scala:828)
> > > >   at
> > > >
> > > >
> > >
> >
> org.apache.spark.sql.DataFrameWriter.saveToV1Source(DataFrameWriter.scala:309)
> > > >   at
> > org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:293)
> > > >   at
> > org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:236)
> > > >   ... 66 elided
> > > > Caused by: java.lang.NoSuchMethodError:
> > > >
> > > >
> > >
> >
> scala.Predef$.refArrayOps([Ljava/lang/Object;)Lscala/collection/mutable/ArrayOps;
> > > >   at
> > > >
> > > >
> > >
> >
> org.apache.hudi.AvroConversionHelper$.createConverterToAvro(AvroConversionHelper.scala:341)
> > > >   at
> > > >
> > > >
> > >
> >
> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:46)
> > > >   at
> > > >
> > > >
> > >
> >
> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:42)
> > > >   at org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2(RDD.scala:837)
> > > >   at
> > > >
> > org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2$adapted(RDD.scala:837)
> > > >   at
> > > >
> > org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> > > >   at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
> > > >   at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
> > > >   at
> > > >
> > org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> > > >   at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
> > > >   at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
> > > >   at
> org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
> > > >   at org.apache.spark.scheduler.Task.run(Task.scala:127)
> > > >   at
> > > >
> > > >
> > >
> >
> org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
> > > >   at
> org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
> > > >   at
> > > org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
> > > >   at
> > > >
> > > >
> > >
> >
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
> > > >   at
> > > >
> > > >
> > >
> >
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
> > > >   at java.lang.Thread.run(Thread.java:748)
> > > >
> > > > Just to unblock my work, I reverted my repo to a commit just before
> > > Udi'ts
> > > > PR(git checkout d9675c4ec0be3f342c30e17a4779c8319b207681) and tried
> > > running
> > > > the same.
> > > >
> > > > ./bin/spark-shell --packages com.databricks:spark-avro_2.11:3.2.0
> > --conf
> > > > 'spark.serializer=org.apache.spark.serializer.KryoSerializer' --jars
> > > >
> > > >
> > >
> >
> /Users/sivabala/Documents/personal/projects/siva_hudi/hudi/packaging/hudi-spark-bundle/target/hudi-spark-bundle-0.5.1-SNAPSHOT.jar
> > > >
> > > > // initial imports.
> > > > ..
> > > > ..
> > > >
> > > > scala> df.write.format("org.apache.hudi").
> > > >      |     options(getQuickstartWriteConfigs).
> > > >      |     option(PRECOMBINE_FIELD_OPT_KEY, "ts").
> > > >      |     option(RECORDKEY_FIELD_OPT_KEY, "uuid").
> > > >      |     option(PARTITIONPATH_FIELD_OPT_KEY, "partitionpath").
> > > >      |     option(TABLE_NAME, tableName).
> > > >      |     mode(Overwrite).
> > > >      |     save(basePath);
> > > > java.lang.NoSuchMethodError:
> > > >
> > > >
> > >
> >
> scala.Predef$.refArrayOps([Ljava/lang/Object;)Lscala/collection/mutable/ArrayOps;
> > > >   at
> > > > org.apache.hudi.com
> > > >
> > >
> >
> .databricks.spark.avro.SchemaConverters$.convertStructToAvro(SchemaConverters.scala:118)
> > > >   at
> > > >
> > > >
> > >
> >
> org.apache.hudi.AvroConversionUtils$.convertStructTypeToAvroSchema(AvroConversionUtils.scala:79)
> > > >   at
> > > >
> > >
> >
> org.apache.hudi.HoodieSparkSqlWriter$.write(HoodieSparkSqlWriter.scala:92)
> > > >   at
> > org.apache.hudi.DefaultSource.createRelation(DefaultSource.scala:91)
> > > >   at
> > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.datasources.SaveIntoDataSourceCommand.run(SaveIntoDataSourceCommand.scala:46)
> > > >   at
> > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:70)
> > > >   at
> > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:68)
> > > >   at
> > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.command.ExecutedCommandExec.doExecute(commands.scala:86)
> > > >   at
> > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.SparkPlan.$anonfun$execute$1(SparkPlan.scala:173)
> > > >   at
> > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.SparkPlan.$anonfun$executeQuery$1(SparkPlan.scala:211)
> > > >   at
> > > >
> > > >
> > >
> >
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
> > > >   at
> > > >
> > >
> >
> org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:208)
> > > >   at
> > > org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:169)
> > > >   at
> > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:110)
> > > >   at
> > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:109)
> > > >   at
> > > >
> > > >
> > >
> >
> org.apache.spark.sql.DataFrameWriter.$anonfun$runCommand$1(DataFrameWriter.scala:828)
> > > >   at
> > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$4(SQLExecution.scala:100)
> > > >   at
> > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:160)
> > > >   at
> > > >
> > > >
> > >
> >
> org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:87)
> > > >   at
> > > >
> > >
> >
> org.apache.spark.sql.DataFrameWriter.runCommand(DataFrameWriter.scala:828)
> > > >   at
> > > >
> > > >
> > >
> >
> org.apache.spark.sql.DataFrameWriter.saveToV1Source(DataFrameWriter.scala:309)
> > > >   at
> > org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:293)
> > > >   at
> > org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:236)
> > > >
> > > >
> > > > --
> > > > Regards,
> > > > -Sivabalan
> > > >
> > >
> >
> >
> > --
> > Regards,
> > -Sivabalan
> >
>

Re: Right Dependency for spark and scala in latest master

Posted by Vinoth Chandar <vi...@apache.org>.
I will triage this tonight and get back!

On Mon, Jan 13, 2020 at 2:28 PM Sivabalan <n....@gmail.com> wrote:

> Yes, that is what I tried. Is there any recommended version. I tried with
> 2.4.4. (My local spark from which I ran spark_shell
> is spark-3.0.0-preview2, guess that does not matter).
>
> ./bin/spark-shell --packages org.apache.spark:spark-avro_2.11:2.4.4 --conf
> 'spark.serializer=org.apache.spark.serializer.KryoSerializer' --jars
>
> /Users/sivabala/Documents/personal/projects/siva_hudi/hudi/packaging/hudi-spark-bundle/target/hudi-spark-bundle-0.5.1-SNAPSHOT.jar
>
>
> On Mon, Jan 13, 2020 at 3:54 PM Vinoth Chandar <vi...@apache.org> wrote:
>
> > Hi Siva,
> >
> > In general, we need to match the
> >  spark-avro_2.11:<spark_version_you_are_running> .. With this change, we
> > effectively dropped support for spark versions older than 2.4.
> > Are you running on a older spark version?
> >
> >
> >
> > On Mon, Jan 13, 2020 at 10:03 AM Sivabalan <n....@gmail.com> wrote:
> >
> > > Hey folks,
> > >    I am running into scala dependency issue w/ latest master while
> trying
> > > to run the Quick Start. Can someone help me out on right dependency.
> > >
> > > I see that with Udit's latest PR, we have to specify explicit packages
> > for
> > > spark-avro. Tried with spark-avro_2.11:2.4.4.
> > >
> > > scala> df.write.format("org.apache.hudi").
> > >      |     options(getQuickstartWriteConfigs).
> > >      |     option(PRECOMBINE_FIELD_OPT_KEY, "ts").
> > >      |     option(RECORDKEY_FIELD_OPT_KEY, "uuid").
> > >      |     option(PARTITIONPATH_FIELD_OPT_KEY, "partitionpath").
> > >      |     option(TABLE_NAME, tableName).
> > >      |     mode(Overwrite).
> > >      |     save(basePath);
> > > java.util.ServiceConfigurationError:
> > > org.apache.spark.sql.sources.DataSourceRegister: Provider
> > > org.apache.spark.sql.avro.AvroFileFormat could not be instantiated
> > >   at java.util.ServiceLoader.fail(ServiceLoader.java:232)
> > >   at java.util.ServiceLoader.access$100(ServiceLoader.java:185)
> > >   at
> > >
> java.util.ServiceLoader$LazyIterator.nextService(ServiceLoader.java:384)
> > >   at java.util.ServiceLoader$LazyIterator.next(ServiceLoader.java:404)
> > >   at java.util.ServiceLoader$1.next(ServiceLoader.java:480)
> > >   at
> > >
> >
> scala.collection.convert.Wrappers$JIteratorWrapper.next(Wrappers.scala:44)
> > >   at scala.collection.Iterator.foreach(Iterator.scala:941)
> > >   at scala.collection.Iterator.foreach$(Iterator.scala:941)
> > >   at scala.collection.AbstractIterator.foreach(Iterator.scala:1429)
> > >   at scala.collection.IterableLike.foreach(IterableLike.scala:74)
> > >   at scala.collection.IterableLike.foreach$(IterableLike.scala:73)
> > >   at scala.collection.AbstractIterable.foreach(Iterable.scala:56)
> > >   at
> > scala.collection.TraversableLike.filterImpl(TraversableLike.scala:255)
> > >   at
> > > scala.collection.TraversableLike.filterImpl$(TraversableLike.scala:249)
> > >   at
> > scala.collection.AbstractTraversable.filterImpl(Traversable.scala:108)
> > >   at scala.collection.TraversableLike.filter(TraversableLike.scala:347)
> > >   at
> scala.collection.TraversableLike.filter$(TraversableLike.scala:347)
> > >   at scala.collection.AbstractTraversable.filter(Traversable.scala:108)
> > >   at
> > >
> > >
> >
> org.apache.spark.sql.execution.datasources.DataSource$.lookupDataSource(DataSource.scala:644)
> > >   at
> > >
> > >
> >
> org.apache.spark.sql.execution.datasources.DataSource$.lookupDataSourceV2(DataSource.scala:728)
> > >   at
> > >
> > >
> >
> org.apache.spark.sql.DataFrameWriter.lookupV2Provider(DataFrameWriter.scala:832)
> > >   at
> org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:252)
> > >   at
> org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:236)
> > >   ... 66 elided
> > > Caused by: java.lang.NoClassDefFoundError:
> > > org/apache/spark/sql/execution/datasources/FileFormat$class
> > >   at
> > >
> org.apache.spark.sql.avro.AvroFileFormat.<init>(AvroFileFormat.scala:44)
> > >   at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native
> > Method)
> > >   at
> > >
> > >
> >
> sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
> > >   at
> > >
> > >
> >
> sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
> > >   at java.lang.reflect.Constructor.newInstance(Constructor.java:423)
> > >   at java.lang.Class.newInstance(Class.java:442)
> > >   at
> > >
> java.util.ServiceLoader$LazyIterator.nextService(ServiceLoader.java:380)
> > >   ... 86 more
> > > Caused by: java.lang.ClassNotFoundException:
> > > org.apache.spark.sql.execution.datasources.FileFormat$class
> > >   at java.net.URLClassLoader.findClass(URLClassLoader.java:382)
> > >   at java.lang.ClassLoader.loadClass(ClassLoader.java:424)
> > >   at java.lang.ClassLoader.loadClass(ClassLoader.java:357)
> > >   ... 93 more
> > >
> > >
> > > So, tried with 2.12.
> > >
> > > ./bin/spark-shell --packages org.apache.spark:spark-avro_2.12:2.4.4
> > --conf
> > > 'spark.serializer=org.apache.spark.serializer.KryoSerializer' --jars
> > >
> > >
> >
> /Users/sivabala/Documents/personal/projects/siva_hudi/hudi/packaging/hudi-spark-bundle/target/hudi-spark-bundle-0.5.1-SNAPSHOT.jar
> > >
> > > scala> df.write.format("org.apache.hudi").
> > >      |     options(getQuickstartWriteConfigs).
> > >      |     option(PRECOMBINE_FIELD_OPT_KEY, "ts").
> > >      |     option(RECORDKEY_FIELD_OPT_KEY, "uuid").
> > >      |     option(PARTITIONPATH_FIELD_OPT_KEY, "partitionpath").
> > >      |     option(TABLE_NAME, tableName).
> > >      |     mode(Overwrite).
> > >      |     save(basePath);
> > > 20/01/13 11:42:45 ERROR Executor: Exception in task 0.0 in stage 1.0
> (TID
> > > 2)
> > > java.lang.NoSuchMethodError:
> > >
> > >
> >
> scala.Predef$.refArrayOps([Ljava/lang/Object;)Lscala/collection/mutable/ArrayOps;
> > > at
> > >
> > >
> >
> org.apache.hudi.AvroConversionHelper$.createConverterToAvro(AvroConversionHelper.scala:341)
> > > at
> > >
> > >
> >
> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:46)
> > > at
> > >
> > >
> >
> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:42)
> > > at org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2(RDD.scala:837)
> > > at
> > org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2$adapted(RDD.scala:837)
> > > at
> > org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> > > at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
> > > at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
> > > at
> > org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> > > at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
> > > at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
> > > at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
> > > at org.apache.spark.scheduler.Task.run(Task.scala:127)
> > > at
> > >
> > >
> >
> org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
> > > at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
> > > at
> org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
> > > at
> > >
> > >
> >
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
> > > at
> > >
> > >
> >
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
> > > at java.lang.Thread.run(Thread.java:748)
> > > 20/01/13 11:42:46 WARN TaskSetManager: Lost task 0.0 in stage 1.0 (TID
> 2,
> > > 192.168.1.209, executor driver): java.lang.NoSuchMethodError:
> > >
> > >
> >
> scala.Predef$.refArrayOps([Ljava/lang/Object;)Lscala/collection/mutable/ArrayOps;
> > > at
> > >
> > >
> >
> org.apache.hudi.AvroConversionHelper$.createConverterToAvro(AvroConversionHelper.scala:341)
> > > at
> > >
> > >
> >
> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:46)
> > > at
> > >
> > >
> >
> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:42)
> > > at org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2(RDD.scala:837)
> > > at
> > org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2$adapted(RDD.scala:837)
> > > at
> > org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> > > at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
> > > at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
> > > at
> > org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> > > at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
> > > at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
> > > at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
> > > at org.apache.spark.scheduler.Task.run(Task.scala:127)
> > > at
> > >
> > >
> >
> org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
> > > at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
> > > at
> org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
> > > at
> > >
> > >
> >
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
> > > at
> > >
> > >
> >
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
> > > at java.lang.Thread.run(Thread.java:748)
> > >
> > > 20/01/13 11:42:46 ERROR TaskSetManager: Task 0 in stage 1.0 failed 1
> > times;
> > > aborting job
> > > org.apache.spark.SparkException: Job aborted due to stage failure:
> Task 0
> > > in stage 1.0 failed 1 times, most recent failure: Lost task 0.0 in
> stage
> > > 1.0 (TID 2, 192.168.1.209, executor driver):
> java.lang.NoSuchMethodError:
> > >
> > >
> >
> scala.Predef$.refArrayOps([Ljava/lang/Object;)Lscala/collection/mutable/ArrayOps;
> > > at
> > >
> > >
> >
> org.apache.hudi.AvroConversionHelper$.createConverterToAvro(AvroConversionHelper.scala:341)
> > > at
> > >
> > >
> >
> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:46)
> > > at
> > >
> > >
> >
> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:42)
> > > at org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2(RDD.scala:837)
> > > at
> > org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2$adapted(RDD.scala:837)
> > > at
> > org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> > > at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
> > > at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
> > > at
> > org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> > > at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
> > > at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
> > > at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
> > > at org.apache.spark.scheduler.Task.run(Task.scala:127)
> > > at
> > >
> > >
> >
> org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
> > > at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
> > > at
> org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
> > > at
> > >
> > >
> >
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
> > > at
> > >
> > >
> >
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
> > > at java.lang.Thread.run(Thread.java:748)
> > >
> > > Driver stacktrace:
> > >   at
> > >
> > >
> >
> org.apache.spark.scheduler.DAGScheduler.failJobAndIndependentStages(DAGScheduler.scala:1989)
> > >   at
> > >
> > >
> >
> org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:1977)
> > >   at
> > >
> > >
> >
> org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:1976)
> > >   at
> > >
> scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62)
> > >   at
> > >
> scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55)
> > >   at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49)
> > >   at
> > >
> >
> org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1976)
> > >   at
> > >
> > >
> >
> org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:956)
> > >   at
> > >
> > >
> >
> org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:956)
> > >   at scala.Option.foreach(Option.scala:407)
> > >   at
> > >
> > >
> >
> org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:956)
> > >   at
> > >
> > >
> >
> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2206)
> > >   at
> > >
> > >
> >
> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2155)
> > >   at
> > >
> > >
> >
> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2144)
> > >   at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
> > >   at
> > org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:758)
> > >   at org.apache.spark.SparkContext.runJob(SparkContext.scala:2116)
> > >   at org.apache.spark.SparkContext.runJob(SparkContext.scala:2137)
> > >   at org.apache.spark.SparkContext.runJob(SparkContext.scala:2156)
> > >   at org.apache.spark.rdd.RDD.$anonfun$take$1(RDD.scala:1423)
> > >   at
> > >
> > >
> >
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
> > >   at
> > >
> > >
> >
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
> > >   at org.apache.spark.rdd.RDD.withScope(RDD.scala:388)
> > >   at org.apache.spark.rdd.RDD.take(RDD.scala:1396)
> > >   at org.apache.spark.rdd.RDD.$anonfun$isEmpty$1(RDD.scala:1531)
> > >   at
> > scala.runtime.java8.JFunction0$mcZ$sp.apply(JFunction0$mcZ$sp.java:23)
> > >   at
> > >
> > >
> >
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
> > >   at
> > >
> > >
> >
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
> > >   at org.apache.spark.rdd.RDD.withScope(RDD.scala:388)
> > >   at org.apache.spark.rdd.RDD.isEmpty(RDD.scala:1531)
> > >   at
> org.apache.spark.api.java.JavaRDDLike.isEmpty(JavaRDDLike.scala:544)
> > >   at
> > org.apache.spark.api.java.JavaRDDLike.isEmpty$(JavaRDDLike.scala:544)
> > >   at
> > >
> >
> org.apache.spark.api.java.AbstractJavaRDDLike.isEmpty(JavaRDDLike.scala:45)
> > >   at
> > >
> >
> org.apache.hudi.HoodieSparkSqlWriter$.write(HoodieSparkSqlWriter.scala:141)
> > >   at
> org.apache.hudi.DefaultSource.createRelation(DefaultSource.scala:91)
> > >   at
> > >
> > >
> >
> org.apache.spark.sql.execution.datasources.SaveIntoDataSourceCommand.run(SaveIntoDataSourceCommand.scala:46)
> > >   at
> > >
> > >
> >
> org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:70)
> > >   at
> > >
> > >
> >
> org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:68)
> > >   at
> > >
> > >
> >
> org.apache.spark.sql.execution.command.ExecutedCommandExec.doExecute(commands.scala:86)
> > >   at
> > >
> > >
> >
> org.apache.spark.sql.execution.SparkPlan.$anonfun$execute$1(SparkPlan.scala:173)
> > >   at
> > >
> > >
> >
> org.apache.spark.sql.execution.SparkPlan.$anonfun$executeQuery$1(SparkPlan.scala:211)
> > >   at
> > >
> > >
> >
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
> > >   at
> > >
> >
> org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:208)
> > >   at
> > org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:169)
> > >   at
> > >
> > >
> >
> org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:110)
> > >   at
> > >
> > >
> >
> org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:109)
> > >   at
> > >
> > >
> >
> org.apache.spark.sql.DataFrameWriter.$anonfun$runCommand$1(DataFrameWriter.scala:828)
> > >   at
> > >
> > >
> >
> org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$4(SQLExecution.scala:100)
> > >   at
> > >
> > >
> >
> org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:160)
> > >   at
> > >
> > >
> >
> org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:87)
> > >   at
> > >
> >
> org.apache.spark.sql.DataFrameWriter.runCommand(DataFrameWriter.scala:828)
> > >   at
> > >
> > >
> >
> org.apache.spark.sql.DataFrameWriter.saveToV1Source(DataFrameWriter.scala:309)
> > >   at
> org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:293)
> > >   at
> org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:236)
> > >   ... 66 elided
> > > Caused by: java.lang.NoSuchMethodError:
> > >
> > >
> >
> scala.Predef$.refArrayOps([Ljava/lang/Object;)Lscala/collection/mutable/ArrayOps;
> > >   at
> > >
> > >
> >
> org.apache.hudi.AvroConversionHelper$.createConverterToAvro(AvroConversionHelper.scala:341)
> > >   at
> > >
> > >
> >
> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:46)
> > >   at
> > >
> > >
> >
> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:42)
> > >   at org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2(RDD.scala:837)
> > >   at
> > >
> org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2$adapted(RDD.scala:837)
> > >   at
> > >
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> > >   at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
> > >   at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
> > >   at
> > >
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> > >   at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
> > >   at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
> > >   at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
> > >   at org.apache.spark.scheduler.Task.run(Task.scala:127)
> > >   at
> > >
> > >
> >
> org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
> > >   at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
> > >   at
> > org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
> > >   at
> > >
> > >
> >
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
> > >   at
> > >
> > >
> >
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
> > >   at java.lang.Thread.run(Thread.java:748)
> > >
> > > Just to unblock my work, I reverted my repo to a commit just before
> > Udi'ts
> > > PR(git checkout d9675c4ec0be3f342c30e17a4779c8319b207681) and tried
> > running
> > > the same.
> > >
> > > ./bin/spark-shell --packages com.databricks:spark-avro_2.11:3.2.0
> --conf
> > > 'spark.serializer=org.apache.spark.serializer.KryoSerializer' --jars
> > >
> > >
> >
> /Users/sivabala/Documents/personal/projects/siva_hudi/hudi/packaging/hudi-spark-bundle/target/hudi-spark-bundle-0.5.1-SNAPSHOT.jar
> > >
> > > // initial imports.
> > > ..
> > > ..
> > >
> > > scala> df.write.format("org.apache.hudi").
> > >      |     options(getQuickstartWriteConfigs).
> > >      |     option(PRECOMBINE_FIELD_OPT_KEY, "ts").
> > >      |     option(RECORDKEY_FIELD_OPT_KEY, "uuid").
> > >      |     option(PARTITIONPATH_FIELD_OPT_KEY, "partitionpath").
> > >      |     option(TABLE_NAME, tableName).
> > >      |     mode(Overwrite).
> > >      |     save(basePath);
> > > java.lang.NoSuchMethodError:
> > >
> > >
> >
> scala.Predef$.refArrayOps([Ljava/lang/Object;)Lscala/collection/mutable/ArrayOps;
> > >   at
> > > org.apache.hudi.com
> > >
> >
> .databricks.spark.avro.SchemaConverters$.convertStructToAvro(SchemaConverters.scala:118)
> > >   at
> > >
> > >
> >
> org.apache.hudi.AvroConversionUtils$.convertStructTypeToAvroSchema(AvroConversionUtils.scala:79)
> > >   at
> > >
> >
> org.apache.hudi.HoodieSparkSqlWriter$.write(HoodieSparkSqlWriter.scala:92)
> > >   at
> org.apache.hudi.DefaultSource.createRelation(DefaultSource.scala:91)
> > >   at
> > >
> > >
> >
> org.apache.spark.sql.execution.datasources.SaveIntoDataSourceCommand.run(SaveIntoDataSourceCommand.scala:46)
> > >   at
> > >
> > >
> >
> org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:70)
> > >   at
> > >
> > >
> >
> org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:68)
> > >   at
> > >
> > >
> >
> org.apache.spark.sql.execution.command.ExecutedCommandExec.doExecute(commands.scala:86)
> > >   at
> > >
> > >
> >
> org.apache.spark.sql.execution.SparkPlan.$anonfun$execute$1(SparkPlan.scala:173)
> > >   at
> > >
> > >
> >
> org.apache.spark.sql.execution.SparkPlan.$anonfun$executeQuery$1(SparkPlan.scala:211)
> > >   at
> > >
> > >
> >
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
> > >   at
> > >
> >
> org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:208)
> > >   at
> > org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:169)
> > >   at
> > >
> > >
> >
> org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:110)
> > >   at
> > >
> > >
> >
> org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:109)
> > >   at
> > >
> > >
> >
> org.apache.spark.sql.DataFrameWriter.$anonfun$runCommand$1(DataFrameWriter.scala:828)
> > >   at
> > >
> > >
> >
> org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$4(SQLExecution.scala:100)
> > >   at
> > >
> > >
> >
> org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:160)
> > >   at
> > >
> > >
> >
> org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:87)
> > >   at
> > >
> >
> org.apache.spark.sql.DataFrameWriter.runCommand(DataFrameWriter.scala:828)
> > >   at
> > >
> > >
> >
> org.apache.spark.sql.DataFrameWriter.saveToV1Source(DataFrameWriter.scala:309)
> > >   at
> org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:293)
> > >   at
> org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:236)
> > >
> > >
> > > --
> > > Regards,
> > > -Sivabalan
> > >
> >
>
>
> --
> Regards,
> -Sivabalan
>

Re: Right Dependency for spark and scala in latest master

Posted by Sivabalan <n....@gmail.com>.
Yes, that is what I tried. Is there any recommended version. I tried with
2.4.4. (My local spark from which I ran spark_shell
is spark-3.0.0-preview2, guess that does not matter).

./bin/spark-shell --packages org.apache.spark:spark-avro_2.11:2.4.4 --conf
'spark.serializer=org.apache.spark.serializer.KryoSerializer' --jars
/Users/sivabala/Documents/personal/projects/siva_hudi/hudi/packaging/hudi-spark-bundle/target/hudi-spark-bundle-0.5.1-SNAPSHOT.jar


On Mon, Jan 13, 2020 at 3:54 PM Vinoth Chandar <vi...@apache.org> wrote:

> Hi Siva,
>
> In general, we need to match the
>  spark-avro_2.11:<spark_version_you_are_running> .. With this change, we
> effectively dropped support for spark versions older than 2.4.
> Are you running on a older spark version?
>
>
>
> On Mon, Jan 13, 2020 at 10:03 AM Sivabalan <n....@gmail.com> wrote:
>
> > Hey folks,
> >    I am running into scala dependency issue w/ latest master while trying
> > to run the Quick Start. Can someone help me out on right dependency.
> >
> > I see that with Udit's latest PR, we have to specify explicit packages
> for
> > spark-avro. Tried with spark-avro_2.11:2.4.4.
> >
> > scala> df.write.format("org.apache.hudi").
> >      |     options(getQuickstartWriteConfigs).
> >      |     option(PRECOMBINE_FIELD_OPT_KEY, "ts").
> >      |     option(RECORDKEY_FIELD_OPT_KEY, "uuid").
> >      |     option(PARTITIONPATH_FIELD_OPT_KEY, "partitionpath").
> >      |     option(TABLE_NAME, tableName).
> >      |     mode(Overwrite).
> >      |     save(basePath);
> > java.util.ServiceConfigurationError:
> > org.apache.spark.sql.sources.DataSourceRegister: Provider
> > org.apache.spark.sql.avro.AvroFileFormat could not be instantiated
> >   at java.util.ServiceLoader.fail(ServiceLoader.java:232)
> >   at java.util.ServiceLoader.access$100(ServiceLoader.java:185)
> >   at
> > java.util.ServiceLoader$LazyIterator.nextService(ServiceLoader.java:384)
> >   at java.util.ServiceLoader$LazyIterator.next(ServiceLoader.java:404)
> >   at java.util.ServiceLoader$1.next(ServiceLoader.java:480)
> >   at
> >
> scala.collection.convert.Wrappers$JIteratorWrapper.next(Wrappers.scala:44)
> >   at scala.collection.Iterator.foreach(Iterator.scala:941)
> >   at scala.collection.Iterator.foreach$(Iterator.scala:941)
> >   at scala.collection.AbstractIterator.foreach(Iterator.scala:1429)
> >   at scala.collection.IterableLike.foreach(IterableLike.scala:74)
> >   at scala.collection.IterableLike.foreach$(IterableLike.scala:73)
> >   at scala.collection.AbstractIterable.foreach(Iterable.scala:56)
> >   at
> scala.collection.TraversableLike.filterImpl(TraversableLike.scala:255)
> >   at
> > scala.collection.TraversableLike.filterImpl$(TraversableLike.scala:249)
> >   at
> scala.collection.AbstractTraversable.filterImpl(Traversable.scala:108)
> >   at scala.collection.TraversableLike.filter(TraversableLike.scala:347)
> >   at scala.collection.TraversableLike.filter$(TraversableLike.scala:347)
> >   at scala.collection.AbstractTraversable.filter(Traversable.scala:108)
> >   at
> >
> >
> org.apache.spark.sql.execution.datasources.DataSource$.lookupDataSource(DataSource.scala:644)
> >   at
> >
> >
> org.apache.spark.sql.execution.datasources.DataSource$.lookupDataSourceV2(DataSource.scala:728)
> >   at
> >
> >
> org.apache.spark.sql.DataFrameWriter.lookupV2Provider(DataFrameWriter.scala:832)
> >   at org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:252)
> >   at org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:236)
> >   ... 66 elided
> > Caused by: java.lang.NoClassDefFoundError:
> > org/apache/spark/sql/execution/datasources/FileFormat$class
> >   at
> > org.apache.spark.sql.avro.AvroFileFormat.<init>(AvroFileFormat.scala:44)
> >   at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native
> Method)
> >   at
> >
> >
> sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
> >   at
> >
> >
> sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
> >   at java.lang.reflect.Constructor.newInstance(Constructor.java:423)
> >   at java.lang.Class.newInstance(Class.java:442)
> >   at
> > java.util.ServiceLoader$LazyIterator.nextService(ServiceLoader.java:380)
> >   ... 86 more
> > Caused by: java.lang.ClassNotFoundException:
> > org.apache.spark.sql.execution.datasources.FileFormat$class
> >   at java.net.URLClassLoader.findClass(URLClassLoader.java:382)
> >   at java.lang.ClassLoader.loadClass(ClassLoader.java:424)
> >   at java.lang.ClassLoader.loadClass(ClassLoader.java:357)
> >   ... 93 more
> >
> >
> > So, tried with 2.12.
> >
> > ./bin/spark-shell --packages org.apache.spark:spark-avro_2.12:2.4.4
> --conf
> > 'spark.serializer=org.apache.spark.serializer.KryoSerializer' --jars
> >
> >
> /Users/sivabala/Documents/personal/projects/siva_hudi/hudi/packaging/hudi-spark-bundle/target/hudi-spark-bundle-0.5.1-SNAPSHOT.jar
> >
> > scala> df.write.format("org.apache.hudi").
> >      |     options(getQuickstartWriteConfigs).
> >      |     option(PRECOMBINE_FIELD_OPT_KEY, "ts").
> >      |     option(RECORDKEY_FIELD_OPT_KEY, "uuid").
> >      |     option(PARTITIONPATH_FIELD_OPT_KEY, "partitionpath").
> >      |     option(TABLE_NAME, tableName).
> >      |     mode(Overwrite).
> >      |     save(basePath);
> > 20/01/13 11:42:45 ERROR Executor: Exception in task 0.0 in stage 1.0 (TID
> > 2)
> > java.lang.NoSuchMethodError:
> >
> >
> scala.Predef$.refArrayOps([Ljava/lang/Object;)Lscala/collection/mutable/ArrayOps;
> > at
> >
> >
> org.apache.hudi.AvroConversionHelper$.createConverterToAvro(AvroConversionHelper.scala:341)
> > at
> >
> >
> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:46)
> > at
> >
> >
> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:42)
> > at org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2(RDD.scala:837)
> > at
> org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2$adapted(RDD.scala:837)
> > at
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> > at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
> > at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
> > at
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> > at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
> > at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
> > at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
> > at org.apache.spark.scheduler.Task.run(Task.scala:127)
> > at
> >
> >
> org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
> > at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
> > at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
> > at
> >
> >
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
> > at
> >
> >
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
> > at java.lang.Thread.run(Thread.java:748)
> > 20/01/13 11:42:46 WARN TaskSetManager: Lost task 0.0 in stage 1.0 (TID 2,
> > 192.168.1.209, executor driver): java.lang.NoSuchMethodError:
> >
> >
> scala.Predef$.refArrayOps([Ljava/lang/Object;)Lscala/collection/mutable/ArrayOps;
> > at
> >
> >
> org.apache.hudi.AvroConversionHelper$.createConverterToAvro(AvroConversionHelper.scala:341)
> > at
> >
> >
> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:46)
> > at
> >
> >
> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:42)
> > at org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2(RDD.scala:837)
> > at
> org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2$adapted(RDD.scala:837)
> > at
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> > at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
> > at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
> > at
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> > at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
> > at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
> > at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
> > at org.apache.spark.scheduler.Task.run(Task.scala:127)
> > at
> >
> >
> org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
> > at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
> > at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
> > at
> >
> >
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
> > at
> >
> >
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
> > at java.lang.Thread.run(Thread.java:748)
> >
> > 20/01/13 11:42:46 ERROR TaskSetManager: Task 0 in stage 1.0 failed 1
> times;
> > aborting job
> > org.apache.spark.SparkException: Job aborted due to stage failure: Task 0
> > in stage 1.0 failed 1 times, most recent failure: Lost task 0.0 in stage
> > 1.0 (TID 2, 192.168.1.209, executor driver): java.lang.NoSuchMethodError:
> >
> >
> scala.Predef$.refArrayOps([Ljava/lang/Object;)Lscala/collection/mutable/ArrayOps;
> > at
> >
> >
> org.apache.hudi.AvroConversionHelper$.createConverterToAvro(AvroConversionHelper.scala:341)
> > at
> >
> >
> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:46)
> > at
> >
> >
> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:42)
> > at org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2(RDD.scala:837)
> > at
> org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2$adapted(RDD.scala:837)
> > at
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> > at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
> > at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
> > at
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> > at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
> > at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
> > at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
> > at org.apache.spark.scheduler.Task.run(Task.scala:127)
> > at
> >
> >
> org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
> > at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
> > at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
> > at
> >
> >
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
> > at
> >
> >
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
> > at java.lang.Thread.run(Thread.java:748)
> >
> > Driver stacktrace:
> >   at
> >
> >
> org.apache.spark.scheduler.DAGScheduler.failJobAndIndependentStages(DAGScheduler.scala:1989)
> >   at
> >
> >
> org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:1977)
> >   at
> >
> >
> org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:1976)
> >   at
> > scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62)
> >   at
> > scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55)
> >   at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49)
> >   at
> >
> org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1976)
> >   at
> >
> >
> org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:956)
> >   at
> >
> >
> org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:956)
> >   at scala.Option.foreach(Option.scala:407)
> >   at
> >
> >
> org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:956)
> >   at
> >
> >
> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2206)
> >   at
> >
> >
> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2155)
> >   at
> >
> >
> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2144)
> >   at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
> >   at
> org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:758)
> >   at org.apache.spark.SparkContext.runJob(SparkContext.scala:2116)
> >   at org.apache.spark.SparkContext.runJob(SparkContext.scala:2137)
> >   at org.apache.spark.SparkContext.runJob(SparkContext.scala:2156)
> >   at org.apache.spark.rdd.RDD.$anonfun$take$1(RDD.scala:1423)
> >   at
> >
> >
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
> >   at
> >
> >
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
> >   at org.apache.spark.rdd.RDD.withScope(RDD.scala:388)
> >   at org.apache.spark.rdd.RDD.take(RDD.scala:1396)
> >   at org.apache.spark.rdd.RDD.$anonfun$isEmpty$1(RDD.scala:1531)
> >   at
> scala.runtime.java8.JFunction0$mcZ$sp.apply(JFunction0$mcZ$sp.java:23)
> >   at
> >
> >
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
> >   at
> >
> >
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
> >   at org.apache.spark.rdd.RDD.withScope(RDD.scala:388)
> >   at org.apache.spark.rdd.RDD.isEmpty(RDD.scala:1531)
> >   at org.apache.spark.api.java.JavaRDDLike.isEmpty(JavaRDDLike.scala:544)
> >   at
> org.apache.spark.api.java.JavaRDDLike.isEmpty$(JavaRDDLike.scala:544)
> >   at
> >
> org.apache.spark.api.java.AbstractJavaRDDLike.isEmpty(JavaRDDLike.scala:45)
> >   at
> >
> org.apache.hudi.HoodieSparkSqlWriter$.write(HoodieSparkSqlWriter.scala:141)
> >   at org.apache.hudi.DefaultSource.createRelation(DefaultSource.scala:91)
> >   at
> >
> >
> org.apache.spark.sql.execution.datasources.SaveIntoDataSourceCommand.run(SaveIntoDataSourceCommand.scala:46)
> >   at
> >
> >
> org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:70)
> >   at
> >
> >
> org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:68)
> >   at
> >
> >
> org.apache.spark.sql.execution.command.ExecutedCommandExec.doExecute(commands.scala:86)
> >   at
> >
> >
> org.apache.spark.sql.execution.SparkPlan.$anonfun$execute$1(SparkPlan.scala:173)
> >   at
> >
> >
> org.apache.spark.sql.execution.SparkPlan.$anonfun$executeQuery$1(SparkPlan.scala:211)
> >   at
> >
> >
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
> >   at
> >
> org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:208)
> >   at
> org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:169)
> >   at
> >
> >
> org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:110)
> >   at
> >
> >
> org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:109)
> >   at
> >
> >
> org.apache.spark.sql.DataFrameWriter.$anonfun$runCommand$1(DataFrameWriter.scala:828)
> >   at
> >
> >
> org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$4(SQLExecution.scala:100)
> >   at
> >
> >
> org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:160)
> >   at
> >
> >
> org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:87)
> >   at
> >
> org.apache.spark.sql.DataFrameWriter.runCommand(DataFrameWriter.scala:828)
> >   at
> >
> >
> org.apache.spark.sql.DataFrameWriter.saveToV1Source(DataFrameWriter.scala:309)
> >   at org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:293)
> >   at org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:236)
> >   ... 66 elided
> > Caused by: java.lang.NoSuchMethodError:
> >
> >
> scala.Predef$.refArrayOps([Ljava/lang/Object;)Lscala/collection/mutable/ArrayOps;
> >   at
> >
> >
> org.apache.hudi.AvroConversionHelper$.createConverterToAvro(AvroConversionHelper.scala:341)
> >   at
> >
> >
> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:46)
> >   at
> >
> >
> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:42)
> >   at org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2(RDD.scala:837)
> >   at
> > org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2$adapted(RDD.scala:837)
> >   at
> > org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> >   at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
> >   at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
> >   at
> > org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> >   at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
> >   at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
> >   at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
> >   at org.apache.spark.scheduler.Task.run(Task.scala:127)
> >   at
> >
> >
> org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
> >   at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
> >   at
> org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
> >   at
> >
> >
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
> >   at
> >
> >
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
> >   at java.lang.Thread.run(Thread.java:748)
> >
> > Just to unblock my work, I reverted my repo to a commit just before
> Udi'ts
> > PR(git checkout d9675c4ec0be3f342c30e17a4779c8319b207681) and tried
> running
> > the same.
> >
> > ./bin/spark-shell --packages com.databricks:spark-avro_2.11:3.2.0 --conf
> > 'spark.serializer=org.apache.spark.serializer.KryoSerializer' --jars
> >
> >
> /Users/sivabala/Documents/personal/projects/siva_hudi/hudi/packaging/hudi-spark-bundle/target/hudi-spark-bundle-0.5.1-SNAPSHOT.jar
> >
> > // initial imports.
> > ..
> > ..
> >
> > scala> df.write.format("org.apache.hudi").
> >      |     options(getQuickstartWriteConfigs).
> >      |     option(PRECOMBINE_FIELD_OPT_KEY, "ts").
> >      |     option(RECORDKEY_FIELD_OPT_KEY, "uuid").
> >      |     option(PARTITIONPATH_FIELD_OPT_KEY, "partitionpath").
> >      |     option(TABLE_NAME, tableName).
> >      |     mode(Overwrite).
> >      |     save(basePath);
> > java.lang.NoSuchMethodError:
> >
> >
> scala.Predef$.refArrayOps([Ljava/lang/Object;)Lscala/collection/mutable/ArrayOps;
> >   at
> > org.apache.hudi.com
> >
> .databricks.spark.avro.SchemaConverters$.convertStructToAvro(SchemaConverters.scala:118)
> >   at
> >
> >
> org.apache.hudi.AvroConversionUtils$.convertStructTypeToAvroSchema(AvroConversionUtils.scala:79)
> >   at
> >
> org.apache.hudi.HoodieSparkSqlWriter$.write(HoodieSparkSqlWriter.scala:92)
> >   at org.apache.hudi.DefaultSource.createRelation(DefaultSource.scala:91)
> >   at
> >
> >
> org.apache.spark.sql.execution.datasources.SaveIntoDataSourceCommand.run(SaveIntoDataSourceCommand.scala:46)
> >   at
> >
> >
> org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:70)
> >   at
> >
> >
> org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:68)
> >   at
> >
> >
> org.apache.spark.sql.execution.command.ExecutedCommandExec.doExecute(commands.scala:86)
> >   at
> >
> >
> org.apache.spark.sql.execution.SparkPlan.$anonfun$execute$1(SparkPlan.scala:173)
> >   at
> >
> >
> org.apache.spark.sql.execution.SparkPlan.$anonfun$executeQuery$1(SparkPlan.scala:211)
> >   at
> >
> >
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
> >   at
> >
> org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:208)
> >   at
> org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:169)
> >   at
> >
> >
> org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:110)
> >   at
> >
> >
> org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:109)
> >   at
> >
> >
> org.apache.spark.sql.DataFrameWriter.$anonfun$runCommand$1(DataFrameWriter.scala:828)
> >   at
> >
> >
> org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$4(SQLExecution.scala:100)
> >   at
> >
> >
> org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:160)
> >   at
> >
> >
> org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:87)
> >   at
> >
> org.apache.spark.sql.DataFrameWriter.runCommand(DataFrameWriter.scala:828)
> >   at
> >
> >
> org.apache.spark.sql.DataFrameWriter.saveToV1Source(DataFrameWriter.scala:309)
> >   at org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:293)
> >   at org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:236)
> >
> >
> > --
> > Regards,
> > -Sivabalan
> >
>


-- 
Regards,
-Sivabalan

Re: Right Dependency for spark and scala in latest master

Posted by Vinoth Chandar <vi...@apache.org>.
Hi Siva,

In general, we need to match the
 spark-avro_2.11:<spark_version_you_are_running> .. With this change, we
effectively dropped support for spark versions older than 2.4.
Are you running on a older spark version?



On Mon, Jan 13, 2020 at 10:03 AM Sivabalan <n....@gmail.com> wrote:

> Hey folks,
>    I am running into scala dependency issue w/ latest master while trying
> to run the Quick Start. Can someone help me out on right dependency.
>
> I see that with Udit's latest PR, we have to specify explicit packages for
> spark-avro. Tried with spark-avro_2.11:2.4.4.
>
> scala> df.write.format("org.apache.hudi").
>      |     options(getQuickstartWriteConfigs).
>      |     option(PRECOMBINE_FIELD_OPT_KEY, "ts").
>      |     option(RECORDKEY_FIELD_OPT_KEY, "uuid").
>      |     option(PARTITIONPATH_FIELD_OPT_KEY, "partitionpath").
>      |     option(TABLE_NAME, tableName).
>      |     mode(Overwrite).
>      |     save(basePath);
> java.util.ServiceConfigurationError:
> org.apache.spark.sql.sources.DataSourceRegister: Provider
> org.apache.spark.sql.avro.AvroFileFormat could not be instantiated
>   at java.util.ServiceLoader.fail(ServiceLoader.java:232)
>   at java.util.ServiceLoader.access$100(ServiceLoader.java:185)
>   at
> java.util.ServiceLoader$LazyIterator.nextService(ServiceLoader.java:384)
>   at java.util.ServiceLoader$LazyIterator.next(ServiceLoader.java:404)
>   at java.util.ServiceLoader$1.next(ServiceLoader.java:480)
>   at
> scala.collection.convert.Wrappers$JIteratorWrapper.next(Wrappers.scala:44)
>   at scala.collection.Iterator.foreach(Iterator.scala:941)
>   at scala.collection.Iterator.foreach$(Iterator.scala:941)
>   at scala.collection.AbstractIterator.foreach(Iterator.scala:1429)
>   at scala.collection.IterableLike.foreach(IterableLike.scala:74)
>   at scala.collection.IterableLike.foreach$(IterableLike.scala:73)
>   at scala.collection.AbstractIterable.foreach(Iterable.scala:56)
>   at scala.collection.TraversableLike.filterImpl(TraversableLike.scala:255)
>   at
> scala.collection.TraversableLike.filterImpl$(TraversableLike.scala:249)
>   at scala.collection.AbstractTraversable.filterImpl(Traversable.scala:108)
>   at scala.collection.TraversableLike.filter(TraversableLike.scala:347)
>   at scala.collection.TraversableLike.filter$(TraversableLike.scala:347)
>   at scala.collection.AbstractTraversable.filter(Traversable.scala:108)
>   at
>
> org.apache.spark.sql.execution.datasources.DataSource$.lookupDataSource(DataSource.scala:644)
>   at
>
> org.apache.spark.sql.execution.datasources.DataSource$.lookupDataSourceV2(DataSource.scala:728)
>   at
>
> org.apache.spark.sql.DataFrameWriter.lookupV2Provider(DataFrameWriter.scala:832)
>   at org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:252)
>   at org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:236)
>   ... 66 elided
> Caused by: java.lang.NoClassDefFoundError:
> org/apache/spark/sql/execution/datasources/FileFormat$class
>   at
> org.apache.spark.sql.avro.AvroFileFormat.<init>(AvroFileFormat.scala:44)
>   at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
>   at
>
> sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
>   at
>
> sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
>   at java.lang.reflect.Constructor.newInstance(Constructor.java:423)
>   at java.lang.Class.newInstance(Class.java:442)
>   at
> java.util.ServiceLoader$LazyIterator.nextService(ServiceLoader.java:380)
>   ... 86 more
> Caused by: java.lang.ClassNotFoundException:
> org.apache.spark.sql.execution.datasources.FileFormat$class
>   at java.net.URLClassLoader.findClass(URLClassLoader.java:382)
>   at java.lang.ClassLoader.loadClass(ClassLoader.java:424)
>   at java.lang.ClassLoader.loadClass(ClassLoader.java:357)
>   ... 93 more
>
>
> So, tried with 2.12.
>
> ./bin/spark-shell --packages org.apache.spark:spark-avro_2.12:2.4.4 --conf
> 'spark.serializer=org.apache.spark.serializer.KryoSerializer' --jars
>
> /Users/sivabala/Documents/personal/projects/siva_hudi/hudi/packaging/hudi-spark-bundle/target/hudi-spark-bundle-0.5.1-SNAPSHOT.jar
>
> scala> df.write.format("org.apache.hudi").
>      |     options(getQuickstartWriteConfigs).
>      |     option(PRECOMBINE_FIELD_OPT_KEY, "ts").
>      |     option(RECORDKEY_FIELD_OPT_KEY, "uuid").
>      |     option(PARTITIONPATH_FIELD_OPT_KEY, "partitionpath").
>      |     option(TABLE_NAME, tableName).
>      |     mode(Overwrite).
>      |     save(basePath);
> 20/01/13 11:42:45 ERROR Executor: Exception in task 0.0 in stage 1.0 (TID
> 2)
> java.lang.NoSuchMethodError:
>
> scala.Predef$.refArrayOps([Ljava/lang/Object;)Lscala/collection/mutable/ArrayOps;
> at
>
> org.apache.hudi.AvroConversionHelper$.createConverterToAvro(AvroConversionHelper.scala:341)
> at
>
> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:46)
> at
>
> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:42)
> at org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2(RDD.scala:837)
> at org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2$adapted(RDD.scala:837)
> at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
> at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
> at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
> at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
> at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
> at org.apache.spark.scheduler.Task.run(Task.scala:127)
> at
>
> org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
> at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
> at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
> at
>
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
> at
>
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
> at java.lang.Thread.run(Thread.java:748)
> 20/01/13 11:42:46 WARN TaskSetManager: Lost task 0.0 in stage 1.0 (TID 2,
> 192.168.1.209, executor driver): java.lang.NoSuchMethodError:
>
> scala.Predef$.refArrayOps([Ljava/lang/Object;)Lscala/collection/mutable/ArrayOps;
> at
>
> org.apache.hudi.AvroConversionHelper$.createConverterToAvro(AvroConversionHelper.scala:341)
> at
>
> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:46)
> at
>
> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:42)
> at org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2(RDD.scala:837)
> at org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2$adapted(RDD.scala:837)
> at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
> at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
> at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
> at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
> at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
> at org.apache.spark.scheduler.Task.run(Task.scala:127)
> at
>
> org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
> at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
> at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
> at
>
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
> at
>
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
> at java.lang.Thread.run(Thread.java:748)
>
> 20/01/13 11:42:46 ERROR TaskSetManager: Task 0 in stage 1.0 failed 1 times;
> aborting job
> org.apache.spark.SparkException: Job aborted due to stage failure: Task 0
> in stage 1.0 failed 1 times, most recent failure: Lost task 0.0 in stage
> 1.0 (TID 2, 192.168.1.209, executor driver): java.lang.NoSuchMethodError:
>
> scala.Predef$.refArrayOps([Ljava/lang/Object;)Lscala/collection/mutable/ArrayOps;
> at
>
> org.apache.hudi.AvroConversionHelper$.createConverterToAvro(AvroConversionHelper.scala:341)
> at
>
> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:46)
> at
>
> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:42)
> at org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2(RDD.scala:837)
> at org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2$adapted(RDD.scala:837)
> at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
> at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
> at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
> at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
> at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
> at org.apache.spark.scheduler.Task.run(Task.scala:127)
> at
>
> org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
> at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
> at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
> at
>
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
> at
>
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
> at java.lang.Thread.run(Thread.java:748)
>
> Driver stacktrace:
>   at
>
> org.apache.spark.scheduler.DAGScheduler.failJobAndIndependentStages(DAGScheduler.scala:1989)
>   at
>
> org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:1977)
>   at
>
> org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:1976)
>   at
> scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62)
>   at
> scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55)
>   at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49)
>   at
> org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1976)
>   at
>
> org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:956)
>   at
>
> org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:956)
>   at scala.Option.foreach(Option.scala:407)
>   at
>
> org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:956)
>   at
>
> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2206)
>   at
>
> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2155)
>   at
>
> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2144)
>   at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
>   at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:758)
>   at org.apache.spark.SparkContext.runJob(SparkContext.scala:2116)
>   at org.apache.spark.SparkContext.runJob(SparkContext.scala:2137)
>   at org.apache.spark.SparkContext.runJob(SparkContext.scala:2156)
>   at org.apache.spark.rdd.RDD.$anonfun$take$1(RDD.scala:1423)
>   at
>
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
>   at
>
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
>   at org.apache.spark.rdd.RDD.withScope(RDD.scala:388)
>   at org.apache.spark.rdd.RDD.take(RDD.scala:1396)
>   at org.apache.spark.rdd.RDD.$anonfun$isEmpty$1(RDD.scala:1531)
>   at scala.runtime.java8.JFunction0$mcZ$sp.apply(JFunction0$mcZ$sp.java:23)
>   at
>
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
>   at
>
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
>   at org.apache.spark.rdd.RDD.withScope(RDD.scala:388)
>   at org.apache.spark.rdd.RDD.isEmpty(RDD.scala:1531)
>   at org.apache.spark.api.java.JavaRDDLike.isEmpty(JavaRDDLike.scala:544)
>   at org.apache.spark.api.java.JavaRDDLike.isEmpty$(JavaRDDLike.scala:544)
>   at
> org.apache.spark.api.java.AbstractJavaRDDLike.isEmpty(JavaRDDLike.scala:45)
>   at
> org.apache.hudi.HoodieSparkSqlWriter$.write(HoodieSparkSqlWriter.scala:141)
>   at org.apache.hudi.DefaultSource.createRelation(DefaultSource.scala:91)
>   at
>
> org.apache.spark.sql.execution.datasources.SaveIntoDataSourceCommand.run(SaveIntoDataSourceCommand.scala:46)
>   at
>
> org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:70)
>   at
>
> org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:68)
>   at
>
> org.apache.spark.sql.execution.command.ExecutedCommandExec.doExecute(commands.scala:86)
>   at
>
> org.apache.spark.sql.execution.SparkPlan.$anonfun$execute$1(SparkPlan.scala:173)
>   at
>
> org.apache.spark.sql.execution.SparkPlan.$anonfun$executeQuery$1(SparkPlan.scala:211)
>   at
>
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
>   at
> org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:208)
>   at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:169)
>   at
>
> org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:110)
>   at
>
> org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:109)
>   at
>
> org.apache.spark.sql.DataFrameWriter.$anonfun$runCommand$1(DataFrameWriter.scala:828)
>   at
>
> org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$4(SQLExecution.scala:100)
>   at
>
> org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:160)
>   at
>
> org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:87)
>   at
> org.apache.spark.sql.DataFrameWriter.runCommand(DataFrameWriter.scala:828)
>   at
>
> org.apache.spark.sql.DataFrameWriter.saveToV1Source(DataFrameWriter.scala:309)
>   at org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:293)
>   at org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:236)
>   ... 66 elided
> Caused by: java.lang.NoSuchMethodError:
>
> scala.Predef$.refArrayOps([Ljava/lang/Object;)Lscala/collection/mutable/ArrayOps;
>   at
>
> org.apache.hudi.AvroConversionHelper$.createConverterToAvro(AvroConversionHelper.scala:341)
>   at
>
> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:46)
>   at
>
> org.apache.hudi.AvroConversionUtils$$anonfun$2.apply(AvroConversionUtils.scala:42)
>   at org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2(RDD.scala:837)
>   at
> org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2$adapted(RDD.scala:837)
>   at
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
>   at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
>   at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
>   at
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
>   at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
>   at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
>   at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
>   at org.apache.spark.scheduler.Task.run(Task.scala:127)
>   at
>
> org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
>   at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
>   at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
>   at
>
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
>   at
>
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
>   at java.lang.Thread.run(Thread.java:748)
>
> Just to unblock my work, I reverted my repo to a commit just before Udi'ts
> PR(git checkout d9675c4ec0be3f342c30e17a4779c8319b207681) and tried running
> the same.
>
> ./bin/spark-shell --packages com.databricks:spark-avro_2.11:3.2.0 --conf
> 'spark.serializer=org.apache.spark.serializer.KryoSerializer' --jars
>
> /Users/sivabala/Documents/personal/projects/siva_hudi/hudi/packaging/hudi-spark-bundle/target/hudi-spark-bundle-0.5.1-SNAPSHOT.jar
>
> // initial imports.
> ..
> ..
>
> scala> df.write.format("org.apache.hudi").
>      |     options(getQuickstartWriteConfigs).
>      |     option(PRECOMBINE_FIELD_OPT_KEY, "ts").
>      |     option(RECORDKEY_FIELD_OPT_KEY, "uuid").
>      |     option(PARTITIONPATH_FIELD_OPT_KEY, "partitionpath").
>      |     option(TABLE_NAME, tableName).
>      |     mode(Overwrite).
>      |     save(basePath);
> java.lang.NoSuchMethodError:
>
> scala.Predef$.refArrayOps([Ljava/lang/Object;)Lscala/collection/mutable/ArrayOps;
>   at
> org.apache.hudi.com
> .databricks.spark.avro.SchemaConverters$.convertStructToAvro(SchemaConverters.scala:118)
>   at
>
> org.apache.hudi.AvroConversionUtils$.convertStructTypeToAvroSchema(AvroConversionUtils.scala:79)
>   at
> org.apache.hudi.HoodieSparkSqlWriter$.write(HoodieSparkSqlWriter.scala:92)
>   at org.apache.hudi.DefaultSource.createRelation(DefaultSource.scala:91)
>   at
>
> org.apache.spark.sql.execution.datasources.SaveIntoDataSourceCommand.run(SaveIntoDataSourceCommand.scala:46)
>   at
>
> org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:70)
>   at
>
> org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:68)
>   at
>
> org.apache.spark.sql.execution.command.ExecutedCommandExec.doExecute(commands.scala:86)
>   at
>
> org.apache.spark.sql.execution.SparkPlan.$anonfun$execute$1(SparkPlan.scala:173)
>   at
>
> org.apache.spark.sql.execution.SparkPlan.$anonfun$executeQuery$1(SparkPlan.scala:211)
>   at
>
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
>   at
> org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:208)
>   at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:169)
>   at
>
> org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:110)
>   at
>
> org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:109)
>   at
>
> org.apache.spark.sql.DataFrameWriter.$anonfun$runCommand$1(DataFrameWriter.scala:828)
>   at
>
> org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$4(SQLExecution.scala:100)
>   at
>
> org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:160)
>   at
>
> org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:87)
>   at
> org.apache.spark.sql.DataFrameWriter.runCommand(DataFrameWriter.scala:828)
>   at
>
> org.apache.spark.sql.DataFrameWriter.saveToV1Source(DataFrameWriter.scala:309)
>   at org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:293)
>   at org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:236)
>
>
> --
> Regards,
> -Sivabalan
>