You are viewing a plain text version of this content. The canonical link for it is here.
Posted to dev@sedona.apache.org by "Jia Yu (Jira)" <ji...@apache.org> on 2022/07/29 03:00:00 UTC

[jira] [Commented] (SEDONA-130) Error while testing the Sedona VIZ example

    [ https://issues.apache.org/jira/browse/SEDONA-130?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=17572724#comment-17572724 ] 

Jia Yu commented on SEDONA-130:
-------------------------------

[~imperio] Can you try the examples here? https://github.com/apache/incubator-sedona/blob/master/examples/viz/src/main/scala/ScalaExample.scala#L104

> Error while testing the Sedona VIZ example
> ------------------------------------------
>
>                 Key: SEDONA-130
>                 URL: https://issues.apache.org/jira/browse/SEDONA-130
>             Project: Apache Sedona
>          Issue Type: Bug
>            Reporter: wxmimperio
>            Priority: Major
>         Attachments: image-2022-06-29-17-52-35-049.png
>
>
> I'm trying to run the Sedona Spark Visualization tutorial code. Example link: [https://sedona.apache.org/tutorial/viz/]
> *sedona version:  sedona-xxx-3.0_2.12  1.2.0-incubating*
> *spark version: 3.1.3*
> checkin.csv
> {code:java}
> -88.331492,32.324142,hotel
> -88.175933,32.360763,gas
> -88.388954,32.357073,bar
> -88.221102,32.35078,restaurant {code}
> Full test code:
> {code:java}
> describe("visualize") {
>     val pointRDDInputLocation = "src/test/doc/checkin.csv"
>     val df = sparkSession.read.format("csv").option("header", "false").load(pointRDDInputLocation)
>     df.createOrReplaceTempView("source")
>     df.printSchema()
>     df.show()    sparkSession.sql(
>       """
>        CREATE OR REPLACE TEMP VIEW pointtable AS
>         |SELECT ST_Point(cast(source._c0 as Decimal(24,20)),cast(source._c1 as Decimal(24,20))) as shape
>         |FROM source
>         """.stripMargin
>     )
>     sparkSession.sql(
>       """CREATE OR REPLACE TEMP VIEW boundtable AS
>         |SELECT ST_Envelope_Aggr(shape) as bound FROM pointtable""".stripMargin
>     )
>     sparkSession.sql(
>       """
>         |CREATE OR REPLACE TEMP VIEW pixels AS
>         |SELECT pixel, shape FROM pointtable
>         |LATERAL VIEW explode(ST_Pixelize(ST_Transform(shape, 'epsg:4326','epsg:3857'), 256, 256, (SELECT ST_Transform(bound, 'epsg:4326','epsg:3857') FROM boundtable))) AS pixel
>         |""".stripMargin
>     )
>     sparkSession.sql(
>       """
>         |CREATE OR REPLACE TEMP VIEW pixelaggregates AS
>         |SELECT pixel, count(*) as weight
>         |FROM pixels
>         |GROUP BY pixel
>         |""".stripMargin
>     )
>     sparkSession.sql(
>       """
>         |CREATE OR REPLACE TEMP VIEW pixelColor AS
>         |SELECT pixel, ST_Colorize(weight, (SELECT max(weight) FROM pixelaggregates)) as color
>         |FROM pixelaggregates
>         |""".stripMargin
>     )
>     sparkSession.sql(
>       """
>         |CREATE OR REPLACE TEMP VIEW images AS
>         |SELECT ST_Render(pixel, color) AS image, (SELECT ST_AsText(bound) FROM boundtable) AS boundary
>         |FROM pixelColor
>         |""".stripMargin
>     )
>     sparkSession.sql("select * from images").show()
>     val image = sparkSession.table("images").take(1)(0)(0).asInstanceOf[ImageSerializableWrapper].getImage
>     val imageGenerator = new ImageGenerator
>     imageGenerator.SaveRasterImageAsLocalFile(image, "src/test/doc/output/points", ImageType.PNG)
> } {code}
> Get Error:
> {code:java}
> java.lang.AssertionError: assertion failed
>     at scala.Predef$.assert(Predef.scala:204)
>     at org.apache.spark.sql.sedona_viz.expressions.ST_Render.merge(Render.scala:98)
>     at org.apache.spark.sql.execution.aggregate.ScalaUDAF.merge(udaf.scala:440)
>     at org.apache.spark.sql.execution.aggregate.AggregationIterator$$anonfun$1.$anonfun$applyOrElse$3(AggregationIterator.scala:199)
>     at org.apache.spark.sql.execution.aggregate.AggregationIterator$$anonfun$1.$anonfun$applyOrElse$3$adapted(AggregationIterator.scala:199)
>     at org.apache.spark.sql.execution.aggregate.AggregationIterator.$anonfun$generateProcessRow$7(AggregationIterator.scala:213)
>     at org.apache.spark.sql.execution.aggregate.AggregationIterator.$anonfun$generateProcessRow$7$adapted(AggregationIterator.scala:207)
>     at org.apache.spark.sql.execution.aggregate.SortBasedAggregationIterator.processCurrentSortedGroup(SortBasedAggregationIterator.scala:114)
>     at org.apache.spark.sql.execution.aggregate.SortBasedAggregationIterator.next(SortBasedAggregationIterator.scala:149)
>     at org.apache.spark.sql.execution.aggregate.SortBasedAggregationIterator.next(SortBasedAggregationIterator.scala:30)
>     at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:346)
>     at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:898)
>     at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:898)
>     at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
>     at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
>     at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
>     at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
>     at org.apache.spark.scheduler.Task.run(Task.scala:131)
>     at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:498)
>     at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1439)
>     at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:501)
>     at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
>     at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
>     at java.lang.Thread.run(Thread.java:748) {code}
> !image-2022-06-29-17-52-35-049.png!



--
This message was sent by Atlassian Jira
(v8.20.10#820010)