You are viewing a plain text version of this content. The canonical link for it is here.
Posted to issues@spark.apache.org by "Yana Kadiyska (JIRA)" <ji...@apache.org> on 2015/07/28 16:52:04 UTC

[jira] [Created] (SPARK-9405) approximateCountDiscting does not work with GroupBy

Yana Kadiyska created SPARK-9405:
------------------------------------

             Summary: approximateCountDiscting does not work with GroupBy
                 Key: SPARK-9405
                 URL: https://issues.apache.org/jira/browse/SPARK-9405
             Project: Spark
          Issue Type: Bug
          Components: SQL
    Affects Versions: 1.4.1
            Reporter: Yana Kadiyska


{code}
case class MockCustomer(val customer_id:Int,val host:String)

val df =sc.parallelize(1 to 10).map(i=>MockCustomer(1234,if (i%2 ==0) "http://foo.com" else "http://bar.com")).toDF
//this works OK
df.groupBy($"host").agg(count($"*"),sum($"customer_id")).show

but this doesnt:

df.groupBy($"host").agg(approxCountDistinct($"*"),sum($"customer_id")).show
15/07/28 10:46:14 INFO BlockManagerInfo: Removed broadcast_55_piece0 on localhost:33727 in memory (size: 4.4 KB, free: 265.3 MB)
15/07/28 10:46:14 INFO BlockManagerInfo: Removed broadcast_54_piece0 on localhost:33727 in memory (size: 4.4 KB, free: 265.3 MB)
org.apache.spark.sql.AnalysisException: cannot resolve 'host' given input columns customer_id, host;
        at org.apache.spark.sql.catalyst.analysis.package$AnalysisErrorAt.failAnalysis(package.scala:42)
        at org.apache.spark.sql.catalyst.analysis.CheckAnalysis$$anonfun$checkAnalysis$1$$anonfun$apply$2.applyOrElse(CheckAnalysis.scala:63)
        at org.apache.spark.sql.catalyst.analysis.CheckAnalysis$$anonfun$checkAnalysis$1$$anonfun$apply$2.applyOrElse(CheckAnalysis.scala:52)
        at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$transformUp$1.apply(TreeNode.scala:286)
        at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$transformUp$1.apply(TreeNode.scala:286)
        at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:51)
        at org.apache.spark.sql.catalyst.trees.TreeNode.transformUp(TreeNode.scala:285)
        at org.apache.spark.sql.catalyst.plans.QueryPlan.org$apache$spark$sql$catalyst$plans$QueryPlan$$transformExpressionUp$1(QueryPlan.scala:108)
        at org.apache.spark.sql.catalyst.plans.QueryPlan$$anonfun$2$$anonfun$apply$2.apply(QueryPlan.scala:123)
        at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:244)
        at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:244)
        at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
        at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:47)
        at scala.collection.TraversableLike$class.map(TraversableLike.scala:244)
        at scala.collection.AbstractTraversable.map(Traversable.scala:105)
        at org.apache.spark.sql.catalyst.plans.QueryPlan$$anonfun$2.apply(QueryPlan.scala:122)
        at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
        at scala.collection.Iterator$class.foreach(Iterator.scala:727)
{code}



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)

---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscribe@spark.apache.org
For additional commands, e-mail: issues-help@spark.apache.org