You are viewing a plain text version of this content. The canonical link for it is here.
Posted to reviews@spark.apache.org by ueshin <gi...@git.apache.org> on 2018/01/10 09:58:16 UTC
[GitHub] spark pull request #19872: [SPARK-22274][PYTHON][SQL] User-defined aggregati...
Github user ueshin commented on a diff in the pull request:
https://github.com/apache/spark/pull/19872#discussion_r160617597
--- Diff: sql/core/src/main/scala/org/apache/spark/sql/execution/python/AggregateInPandasExec.scala ---
@@ -0,0 +1,152 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.spark.sql.execution.python
+
+import java.io.File
+
+import scala.collection.mutable.ArrayBuffer
+
+import org.apache.spark.{SparkEnv, TaskContext}
+import org.apache.spark.api.python.{ChainedPythonFunctions, PythonEvalType}
+import org.apache.spark.rdd.RDD
+import org.apache.spark.sql.catalyst.InternalRow
+import org.apache.spark.sql.catalyst.expressions._
+import org.apache.spark.sql.catalyst.plans.physical.{AllTuples, ClusteredDistribution, Distribution, Partitioning}
+import org.apache.spark.sql.execution.{GroupedIterator, SparkPlan, UnaryExecNode}
+import org.apache.spark.sql.types.{DataType, StructField, StructType}
+import org.apache.spark.util.Utils
+
+/**
+ * Physical node for aggregation with group aggregate Pandas UDF.
+ *
+ * This plan works by sending the necessary (projected) input grouped data as Arrow record batches
+ * to the python worker, the python worker invokes the UDF and sends the results to the executor,
+ * finally the executor evaluates any post-aggregation expressions and join the result with the
+ * grouped key.
+ */
+case class AggregateInPandasExec(
+ groupingExpressions: Seq[NamedExpression],
+ udfExpressions: Seq[PythonUDF],
+ resultExpressions: Seq[NamedExpression],
+ child: SparkPlan)
+ extends UnaryExecNode {
+
+ override val output: Seq[Attribute] = resultExpressions.map(_.toAttribute)
+
+ override def outputPartitioning: Partitioning = child.outputPartitioning
+
+ override def producedAttributes: AttributeSet = AttributeSet(output)
+
+ override def requiredChildDistribution: Seq[Distribution] = {
+ if (groupingExpressions.isEmpty) {
+ AllTuples :: Nil
+ } else {
+ ClusteredDistribution(groupingExpressions) :: Nil
+ }
+ }
+
+ private def collectFunctions(udf: PythonUDF): (ChainedPythonFunctions, Seq[Expression]) = {
+ udf.children match {
+ case Seq(u: PythonUDF) =>
+ val (chained, children) = collectFunctions(u)
+ (ChainedPythonFunctions(chained.funcs ++ Seq(udf.func)), children)
+ case children =>
+ // There should not be any other UDFs, or the children can't be evaluated directly.
+ assert(children.forall(_.find(_.isInstanceOf[PythonUDF]).isEmpty))
+ (ChainedPythonFunctions(Seq(udf.func)), udf.children)
+ }
+ }
+
+ override def requiredChildOrdering: Seq[Seq[SortOrder]] =
+ Seq(groupingExpressions.map(SortOrder(_, Ascending)))
+
+ override protected def doExecute(): RDD[InternalRow] = {
+ val inputRDD = child.execute()
+
+ val bufferSize = inputRDD.conf.getInt("spark.buffer.size", 65536)
+ val reuseWorker = inputRDD.conf.getBoolean("spark.python.worker.reuse", defaultValue = true)
+ val sessionLocalTimeZone = conf.sessionLocalTimeZone
+ val pandasRespectSessionTimeZone = conf.pandasRespectSessionTimeZone
+
+ val (pyFuncs, inputs) = udfExpressions.map(collectFunctions).unzip
+
+ val allInputs = new ArrayBuffer[Expression]
+ val dataTypes = new ArrayBuffer[DataType]
+ val argOffsets = inputs.map { input =>
+ input.map { e =>
+ if (allInputs.exists(_.semanticEquals(e))) {
+ allInputs.indexWhere(_.semanticEquals(e))
+ } else {
+ allInputs += e
+ dataTypes += e.dataType
+ allInputs.length - 1
+ }
+ }.toArray
+ }.toArray
+
+ val schema = StructType(dataTypes.zipWithIndex.map { case (dt, i) =>
+ StructField(s"_$i", dt)
+ })
+
+ val input = groupingExpressions.map(_.toAttribute) ++ udfExpressions.map(_.resultAttribute)
--- End diff --
nit: maybe this name `input` is confusing.
---
---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscribe@spark.apache.org
For additional commands, e-mail: reviews-help@spark.apache.org