You are viewing a plain text version of this content. The canonical link for it is here.
Posted to dev@phoenix.apache.org by "海南中剑 (JIRA)" <ji...@apache.org> on 2019/07/15 10:47:00 UTC

[jira] [Created] (PHOENIX-5392) phoenix load timestamp filter

海南中剑 created PHOENIX-5392:
-----------------------------

             Summary: phoenix load timestamp filter
                 Key: PHOENIX-5392
                 URL: https://issues.apache.org/jira/browse/PHOENIX-5392
             Project: Phoenix
          Issue Type: Bug
    Affects Versions: 4.14.0
            Reporter: 海南中剑
             Fix For: 4.14.0


val df :DataFrame = hiveContext.read.format("org.apache.phoenix.spark")
 .option("table","phoenix_cm_fdw.f_interest")
 .option("zkUrl",zkUrl)
 .option("dateAsTimestamp","true")
 .load()
 * first function:

val resultDf = df.filter("ui_create_time >= cast(1563156051 as timestamp)")

resultDf.show()

question:

org.apache.phoenix.spark.PhoenixRelation.buildScan    filters has 'ui_create_time' value

Select Statement:

SELECT "0"."UI_CREATE_TIME" FROM phoenix_cm_fdw.f_interest WHERE ( "UI_CREATE_TIME" >= 2019-07-11 10:00:51.0)

error:

Failed to get the query plan with error [ERROR 604 (42P00): Syntax error. Mismatched input. Expecting "RPAREN", got "10" at line 1, column 99.]

Caused by: java.lang.RuntimeException: org.apache.phoenix.exception.PhoenixParserException: ERROR 604 (42P00): Syntax error. Mismatched input. Expecting "RPAREN", got "10" at line 1, column 99.
 at org.apache.phoenix.mapreduce.PhoenixInputFormat.getQueryPlan(PhoenixInputFormat.java:208)
 at org.apache.phoenix.mapreduce.PhoenixInputFormat.getSplits(PhoenixInputFormat.java:87)
 at org.apache.spark.rdd.NewHadoopRDD.getPartitions(NewHadoopRDD.scala:124)
 at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)
 at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)
 at scala.Option.getOrElse(Option.scala:120)
 at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)
 at org.apache.phoenix.spark.PhoenixRDD.getPartitions(PhoenixRDD.scala:55)
 at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)
 at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)
 at scala.Option.getOrElse(Option.scala:120)
 at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)
 at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
 at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)
 at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)
 at scala.Option.getOrElse(Option.scala:120)
 at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)
 at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
 at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)
 at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)
 at scala.Option.getOrElse(Option.scala:120)
 at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)
 at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
 at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)
 at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)
 at scala.Option.getOrElse(Option.scala:120)
 at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)
 at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
 at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)
 at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)
 at scala.Option.getOrElse(Option.scala:120)
 at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)
 at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
 at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)
 at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)
 at scala.Option.getOrElse(Option.scala:120)
 at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)
 at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
 at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)
 at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)
 at scala.Option.getOrElse(Option.scala:120)
 at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)
 at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
 at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)
 at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)
 at scala.Option.getOrElse(Option.scala:120)
 at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)
 at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
 at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)
 at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)
 at scala.Option.getOrElse(Option.scala:120)
 at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)
 at org.apache.spark.ShuffleDependency.<init>(Dependency.scala:91)
 at org.apache.spark.sql.execution.Exchange.prepareShuffleDependency(Exchange.scala:220)
 at org.apache.spark.sql.execution.Exchange$$anonfun$doExecute$1.apply(Exchange.scala:254)
 at org.apache.spark.sql.execution.Exchange$$anonfun$doExecute$1.apply(Exchange.scala:248)
 at org.apache.spark.sql.catalyst.errors.package$.attachTree(package.scala:48)
 ... 27 more
 * second function 

val resultDf = df.filter(df("ui_interest_date") >= "2019-07-14 20:00:00.0" )

org.apache.phoenix.spark.PhoenixRelation.buildScan    filters dontnot have 'ui_create_time' value

Select Statement:

SELECT "0"."UI_CREATE_TIME" FROM phoenix_cm_fdw.f_interest

 

why ???



--
This message was sent by Atlassian JIRA
(v7.6.14#76016)