You are viewing a plain text version of this content. The canonical link for it is here.
Posted to reviews@spark.apache.org by cloud-fan <gi...@git.apache.org> on 2017/07/06 08:13:40 UTC

[GitHub] spark pull request #18307: [SPARK-21100][SQL] Add summary method as alternat...

Github user cloud-fan commented on a diff in the pull request:

    https://github.com/apache/spark/pull/18307#discussion_r125836508
  
    --- Diff: sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala ---
    @@ -2205,47 +2205,80 @@ class Dataset[T] private[sql](
        *   // max     92.0  192.0
        * }}}
        *
    +   * Use [[summary]] for expanded statistics and control over which statistics to compute.
    +   *
    +   * @param cols Columns to compute statistics on.
    +   *
        * @group action
        * @since 1.6.0
        */
       @scala.annotation.varargs
    -  def describe(cols: String*): DataFrame = withPlan {
    -
    -    // The list of summary statistics to compute, in the form of expressions.
    -    val statistics = List[(String, Expression => Expression)](
    -      "count" -> ((child: Expression) => Count(child).toAggregateExpression()),
    -      "mean" -> ((child: Expression) => Average(child).toAggregateExpression()),
    -      "stddev" -> ((child: Expression) => StddevSamp(child).toAggregateExpression()),
    -      "min" -> ((child: Expression) => Min(child).toAggregateExpression()),
    -      "max" -> ((child: Expression) => Max(child).toAggregateExpression()))
    -
    -    val outputCols =
    -      (if (cols.isEmpty) aggregatableColumns.map(usePrettyExpression(_).sql) else cols).toList
    -
    -    val ret: Seq[Row] = if (outputCols.nonEmpty) {
    -      val aggExprs = statistics.flatMap { case (_, colToAgg) =>
    -        outputCols.map(c => Column(Cast(colToAgg(Column(c).expr), StringType)).as(c))
    -      }
    -
    -      val row = groupBy().agg(aggExprs.head, aggExprs.tail: _*).head().toSeq
    -
    -      // Pivot the data so each summary is one row
    -      row.grouped(outputCols.size).toSeq.zip(statistics).map { case (aggregation, (statistic, _)) =>
    -        Row(statistic :: aggregation.toList: _*)
    -      }
    -    } else {
    -      // If there are no output columns, just output a single column that contains the stats.
    -      statistics.map { case (name, _) => Row(name) }
    -    }
    -
    -    // All columns are string type
    -    val schema = StructType(
    -      StructField("summary", StringType) :: outputCols.map(StructField(_, StringType))).toAttributes
    -    // `toArray` forces materialization to make the seq serializable
    -    LocalRelation.fromExternalRows(schema, ret.toArray.toSeq)
    +  def describe(cols: String*): DataFrame = {
    +    val selected = if (cols.isEmpty) this else select(cols.head, cols.tail: _*)
    +    selected.summary("count", "mean", "stddev", "min", "max")
       }
     
       /**
    +   * Computes specified statistics for numeric and string columns. Available statistics are:
    +   *
    +   * - count
    +   * - mean
    +   * - stddev
    +   * - min
    +   * - max
    +   * - arbitrary approximate percentiles specified as a percentage (eg, 75%)
    +   *
    +   * If no statistics are given, this function computes count, mean, stddev, min,
    +   * approximate quartiles, and max.
    --- End diff --
    
    `approximate quartiles at 25%, 50% and 75%`?


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastructure@apache.org or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscribe@spark.apache.org
For additional commands, e-mail: reviews-help@spark.apache.org