You are viewing a plain text version of this content. The canonical link for it is here.
Posted to dev@phoenix.apache.org by "Babar Tareen (JIRA)" <ji...@apache.org> on 2015/09/23 19:11:04 UTC

[jira] [Created] (PHOENIX-2287) Spark Plugin Exception - java.lang.ClassCastException: org.apache.spark.sql.catalyst.expressions.GenericMutableRow cannot be cast to org.apache.spark.sql.Row

Babar Tareen created PHOENIX-2287:
-------------------------------------

             Summary: Spark Plugin Exception - java.lang.ClassCastException: org.apache.spark.sql.catalyst.expressions.GenericMutableRow cannot be cast to org.apache.spark.sql.Row
                 Key: PHOENIX-2287
                 URL: https://issues.apache.org/jira/browse/PHOENIX-2287
             Project: Phoenix
          Issue Type: Bug
    Affects Versions: 4.5.2
         Environment: - HBase 1.1.1 running in standalone mode on OS X
- Spark 1.5.0
- Phoenix 4.5.2
            Reporter: Babar Tareen


Running the DataFrame example on Spark Plugin page (https://phoenix.apache.org/phoenix_spark.html) results in following exception. The same code works as expected with Spark 1.4.1.

{code:java}
    import org.apache.spark.SparkContext
    import org.apache.spark.sql.SQLContext
    import org.apache.phoenix.spark._

    val sc = new SparkContext("local", "phoenix-test")
    val sqlContext = new SQLContext(sc)

    val df = sqlContext.load(
      "org.apache.phoenix.spark",
      Map("table" -> "TABLE1", "zkUrl" -> "127.0.0.1:2181")
    )

    df
      .filter(df("COL1") === "test_row_1" && df("ID") === 1L)
      .select(df("ID"))
      .show
{code}

Exception
{quote}
java.lang.ClassCastException: org.apache.spark.sql.catalyst.expressions.GenericMutableRow cannot be cast to org.apache.spark.sql.Row
    at org.apache.spark.sql.SQLContext$$anonfun$7.apply(SQLContext.scala:439) ~[spark-sql_2.11-1.5.0.jar:1.5.0]
    at scala.collection.Iterator$$anon$11.next(Iterator.scala:363) ~[scala-library-2.11.4.jar:na]
    at scala.collection.Iterator$$anon$11.next(Iterator.scala:363) ~[scala-library-2.11.4.jar:na]
    at scala.collection.Iterator$$anon$11.next(Iterator.scala:363) ~[scala-library-2.11.4.jar:na]
    at org.apache.spark.sql.execution.aggregate.TungstenAggregationIterator.processInputs(TungstenAggregationIterator.scala:366) ~[spark-sql_2.11-1.5.0.jar:1.5.0]
    at org.apache.spark.sql.execution.aggregate.TungstenAggregationIterator.start(TungstenAggregationIterator.scala:622) ~[spark-sql_2.11-1.5.0.jar:1.5.0]
    at org.apache.spark.sql.execution.aggregate.TungstenAggregate$$anonfun$doExecute$1.org$apache$spark$sql$execution$aggregate$TungstenAggregate$$anonfun$$executePartition$1(TungstenAggregate.scala:110) ~[spark-sql_2.11-1.5.0.jar:1.5.0]
    at org.apache.spark.sql.execution.aggregate.TungstenAggregate$$anonfun$doExecute$1$$anonfun$2.apply(TungstenAggregate.scala:119) ~[spark-sql_2.11-1.5.0.jar:1.5.0]
    at org.apache.spark.sql.execution.aggregate.TungstenAggregate$$anonfun$doExecute$1$$anonfun$2.apply(TungstenAggregate.scala:119) ~[spark-sql_2.11-1.5.0.jar:1.5.0]
    at org.apache.spark.rdd.MapPartitionsWithPreparationRDD.compute(MapPartitionsWithPreparationRDD.scala:64) ~[spark-core_2.11-1.5.0.jar:1.5.0]
    at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:297) ~[spark-core_2.11-1.5.0.jar:1.5.0]
    at org.apache.spark.rdd.RDD.iterator(RDD.scala:264) ~[spark-core_2.11-1.5.0.jar:1.5.0]
    at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) ~[spark-core_2.11-1.5.0.jar:1.5.0]
    at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:297) ~[spark-core_2.11-1.5.0.jar:1.5.0]
    at org.apache.spark.rdd.RDD.iterator(RDD.scala:264) ~[spark-core_2.11-1.5.0.jar:1.5.0]
    at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:73) ~[spark-core_2.11-1.5.0.jar:1.5.0]
    at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:41) ~[spark-core_2.11-1.5.0.jar:1.5.0]
    at org.apache.spark.scheduler.Task.run(Task.scala:88) ~[spark-core_2.11-1.5.0.jar:1.5.0]
    at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:214) ~[spark-core_2.11-1.5.0.jar:1.5.0]
    at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) [na:1.8.0_45]
    at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) [na:1.8.0_45]
    at java.lang.Thread.run(Thread.java:745) [na:1.8.0_45]
{quote}



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)