You are viewing a plain text version of this content. The canonical link for it is here.
Posted to github@arrow.apache.org by GitBox <gi...@apache.org> on 2020/10/04 19:01:11 UTC

[GitHub] [arrow] jorgecarleitao commented on a change in pull request #8222: ARROW-10043: [Rust][DataFusion] Implement COUNT(DISTINCT col)

jorgecarleitao commented on a change in pull request #8222:
URL: https://github.com/apache/arrow/pull/8222#discussion_r499271330



##########
File path: rust/datafusion/src/physical_plan/aggregates.rs
##########
@@ -124,14 +126,30 @@ pub fn create_aggregate_expr(
 
     let return_type = return_type(&fun, &arg_types)?;
 
-    Ok(match fun {
-        AggregateFunction::Count => {
+    Ok(match (fun, distinct) {
+        (AggregateFunction::Count, false) => {
             Arc::new(expressions::Count::new(arg, name, return_type))
         }
-        AggregateFunction::Sum => Arc::new(expressions::Sum::new(arg, name, return_type)),
-        AggregateFunction::Min => Arc::new(expressions::Min::new(arg, name, return_type)),
-        AggregateFunction::Max => Arc::new(expressions::Max::new(arg, name, return_type)),
-        AggregateFunction::Avg => Arc::new(expressions::Avg::new(arg, name, return_type)),
+        (AggregateFunction::Count, true) => {
+            Arc::new(distinct_expressions::DistinctCount::new(
+                arg_types,
+                args.clone(),
+                name,
+                return_type,
+            ))
+        }
+        (AggregateFunction::Sum, _) => {

Review comment:
       What do you think of replacing `_` by `false` and return an error if the expression is not implemented, just to be on the safe side?

##########
File path: rust/datafusion/src/physical_plan/distinct_expressions.rs
##########
@@ -0,0 +1,203 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+//! Implementations for DISTINCT expressions, e.g. `COUNT(DISTINCT c)`
+
+use std::cell::RefCell;
+use std::convert::TryFrom;
+use std::fmt::Debug;
+use std::hash::Hash;
+use std::rc::Rc;
+use std::sync::Arc;
+
+use arrow::array::ArrayRef;
+use arrow::array::ListArray;
+use arrow::datatypes::{DataType, Field};
+
+use fnv::FnvHashSet;
+
+use crate::error::{ExecutionError, Result};
+use crate::physical_plan::group_scalar::GroupByScalar;
+use crate::physical_plan::{Accumulator, AggregateExpr, PhysicalExpr};
+use crate::scalar::ScalarValue;
+
+#[derive(Debug, PartialEq, Eq, Hash, Clone)]
+struct DistinctScalarValues(Vec<GroupByScalar>);
+
+fn format_state_name(name: &str, state_name: &str) -> String {
+    format!("{}[{}]", name, state_name)
+}
+
+/// Expression for a COUNT(DISTINCT) aggregation.
+#[derive(Debug)]
+pub struct DistinctCount {
+    /// Column name
+    name: String,
+    /// The DataType for the final count
+    data_type: DataType,
+    /// The DataType for each input argument
+    input_data_types: Vec<DataType>,
+    /// The input arguments
+    exprs: Vec<Arc<dyn PhysicalExpr>>,
+}
+
+impl DistinctCount {
+    /// Create a new COUNT(DISTINCT) aggregate function.
+    pub fn new(
+        input_data_types: Vec<DataType>,
+        exprs: Vec<Arc<dyn PhysicalExpr>>,
+        name: String,
+        data_type: DataType,
+    ) -> Self {
+        Self {
+            input_data_types,
+            exprs,
+            name,
+            data_type,
+        }
+    }
+}
+
+impl AggregateExpr for DistinctCount {
+    fn field(&self) -> Result<Field> {
+        Ok(Field::new(&self.name, self.data_type.clone(), false))
+    }
+
+    fn state_fields(&self) -> Result<Vec<Field>> {
+        Ok(self
+            .input_data_types
+            .iter()
+            .map(|data_type| {
+                Field::new(
+                    &format_state_name(&self.name, "count distinct"),
+                    DataType::List(Box::new(data_type.clone())),
+                    false,
+                )
+            })
+            .collect::<Vec<_>>())
+    }
+
+    fn expressions(&self) -> Vec<Arc<dyn PhysicalExpr>> {
+        self.exprs.clone()
+    }
+
+    fn create_accumulator(&self) -> Result<Rc<RefCell<dyn Accumulator>>> {
+        Ok(Rc::new(RefCell::new(DistinctCountAccumulator {
+            values: FnvHashSet::default(),
+            data_types: self.input_data_types.clone(),
+            count_data_type: self.data_type.clone(),
+        })))
+    }
+}
+
+#[derive(Debug)]
+struct DistinctCountAccumulator {
+    values: FnvHashSet<DistinctScalarValues>,
+    data_types: Vec<DataType>,
+    count_data_type: DataType,
+}
+
+impl Accumulator for DistinctCountAccumulator {
+    fn update_batch(&mut self, arrays: &Vec<ArrayRef>) -> Result<()> {
+        for row in 0..arrays[0].len() {
+            let row_values = arrays
+                .iter()
+                .map(|array| ScalarValue::try_from_array(array, row))
+                .collect::<Result<Vec<_>>>()?;
+
+            self.update(&row_values)?;
+        }
+
+        Ok(())
+    }
+
+    fn update(&mut self, values: &Vec<ScalarValue>) -> Result<()> {
+        // If a row has a NULL, it is not included in the final count.
+        if !values.iter().any(|v| v.is_null()) {
+            self.values.insert(DistinctScalarValues(
+                values
+                    .iter()
+                    .map(GroupByScalar::try_from)
+                    .collect::<Result<Vec<_>>>()?,
+            ));
+        }
+
+        Ok(())
+    }
+
+    fn merge(&mut self, states: &Vec<ScalarValue>) -> Result<()> {
+        self.update(states)
+    }
+
+    fn merge_batch(&mut self, states: &Vec<ArrayRef>) -> Result<()> {
+        let list_arrays = states
+            .iter()
+            .map(|state_array| {
+                state_array.as_any().downcast_ref::<ListArray>().ok_or(
+                    ExecutionError::InternalError(
+                        "Failed to downcast ListArray".to_string(),
+                    ),
+                )
+            })
+            .collect::<Result<Vec<_>>>()?;
+
+        let values_arrays = list_arrays
+            .iter()
+            .map(|list_array| list_array.values())
+            .collect::<Vec<_>>();
+
+        self.update_batch(&values_arrays)
+    }
+
+    fn state(&self) -> Result<Vec<ScalarValue>> {
+        let mut cols_out = self
+            .data_types
+            .iter()
+            .map(|data_type| ScalarValue::List(Some(Vec::new()), data_type.clone()))
+            .collect::<Vec<_>>();
+
+        let mut cols_vec = cols_out
+            .iter_mut()
+            .map(|c| match c {
+                ScalarValue::List(Some(ref mut v), _) => v,
+                _ => unreachable!(),
+            })
+            .collect::<Vec<_>>();
+
+        self.values.iter().for_each(|distinct_values| {
+            distinct_values.0.iter().enumerate().for_each(
+                |(col_index, distinct_value)| {
+                    cols_vec[col_index].push(ScalarValue::from(distinct_value));
+                },
+            )
+        });
+
+        Ok(cols_out)
+    }
+
+    fn evaluate(&self) -> Result<ScalarValue> {
+        match &self.count_data_type {

Review comment:
       Given that this only returns `UInt64`, what do you think about removing the `count_data_type` from the constructor's argument?

##########
File path: rust/datafusion/src/physical_plan/group_scalar.rs
##########
@@ -0,0 +1,77 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+//! Defines scalars used to construct groups, ex. in GROUP BY clauses.
+
+use std::convert::{From, TryFrom};
+
+use crate::error::{ExecutionError, Result};
+use crate::scalar::ScalarValue;
+
+/// Enumeration of types that can be used in a GROUP BY expression (all primitives except
+/// for floating point numerics)
+#[derive(Debug, PartialEq, Eq, Hash, Clone)]
+pub(crate) enum GroupByScalar {
+    UInt8(u8),
+    UInt16(u16),
+    UInt32(u32),
+    UInt64(u64),
+    Int8(i8),
+    Int16(i16),
+    Int32(i32),
+    Int64(i64),
+    Utf8(String),
+}
+
+impl TryFrom<&ScalarValue> for GroupByScalar {
+    type Error = ExecutionError;
+
+    fn try_from(scalar_value: &ScalarValue) -> Result<Self> {
+        Ok(match scalar_value {
+            ScalarValue::Int8(Some(v)) => GroupByScalar::Int8(*v),
+            ScalarValue::Int16(Some(v)) => GroupByScalar::Int16(*v),
+            ScalarValue::Int32(Some(v)) => GroupByScalar::Int32(*v),
+            ScalarValue::Int64(Some(v)) => GroupByScalar::Int64(*v),
+            ScalarValue::UInt8(Some(v)) => GroupByScalar::UInt8(*v),
+            ScalarValue::UInt16(Some(v)) => GroupByScalar::UInt16(*v),
+            ScalarValue::UInt32(Some(v)) => GroupByScalar::UInt32(*v),
+            ScalarValue::UInt64(Some(v)) => GroupByScalar::UInt64(*v),
+            ScalarValue::Utf8(Some(v)) => GroupByScalar::Utf8(v.clone()),
+            _ => {
+                return Err(ExecutionError::InternalError(
+                    "Cannot convert ScalarValue to GroupByScalar".to_string(),

Review comment:
       What do you think adding the `DataType` that we were unable to convert, so that we have a more informative error message?

##########
File path: rust/datafusion/src/test/mod.rs
##########
@@ -135,6 +135,13 @@ pub fn format_batch(batch: &RecordBatch) -> Vec<String> {
             }
             let array = batch.column(column_index);
             match array.data_type() {
+                DataType::Utf8 => s.push_str(
+                    array
+                        .as_any()
+                        .downcast_ref::<array::StringArray>()
+                        .unwrap()
+                        .value(row_index),
+                ),

Review comment:
       I think that I also miss a test in this module

##########
File path: rust/datafusion/src/physical_plan/distinct_expressions.rs
##########
@@ -0,0 +1,203 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+//! Implementations for DISTINCT expressions, e.g. `COUNT(DISTINCT c)`
+
+use std::cell::RefCell;
+use std::convert::TryFrom;
+use std::fmt::Debug;
+use std::hash::Hash;
+use std::rc::Rc;
+use std::sync::Arc;
+
+use arrow::array::ArrayRef;
+use arrow::array::ListArray;
+use arrow::datatypes::{DataType, Field};
+
+use fnv::FnvHashSet;
+
+use crate::error::{ExecutionError, Result};
+use crate::physical_plan::group_scalar::GroupByScalar;
+use crate::physical_plan::{Accumulator, AggregateExpr, PhysicalExpr};
+use crate::scalar::ScalarValue;
+
+#[derive(Debug, PartialEq, Eq, Hash, Clone)]
+struct DistinctScalarValues(Vec<GroupByScalar>);
+
+fn format_state_name(name: &str, state_name: &str) -> String {
+    format!("{}[{}]", name, state_name)
+}
+
+/// Expression for a COUNT(DISTINCT) aggregation.
+#[derive(Debug)]
+pub struct DistinctCount {
+    /// Column name
+    name: String,
+    /// The DataType for the final count
+    data_type: DataType,
+    /// The DataType for each input argument
+    input_data_types: Vec<DataType>,
+    /// The input arguments
+    exprs: Vec<Arc<dyn PhysicalExpr>>,
+}
+
+impl DistinctCount {
+    /// Create a new COUNT(DISTINCT) aggregate function.
+    pub fn new(
+        input_data_types: Vec<DataType>,
+        exprs: Vec<Arc<dyn PhysicalExpr>>,
+        name: String,
+        data_type: DataType,
+    ) -> Self {
+        Self {
+            input_data_types,
+            exprs,
+            name,
+            data_type,
+        }
+    }
+}
+
+impl AggregateExpr for DistinctCount {
+    fn field(&self) -> Result<Field> {
+        Ok(Field::new(&self.name, self.data_type.clone(), false))
+    }
+
+    fn state_fields(&self) -> Result<Vec<Field>> {
+        Ok(self
+            .input_data_types
+            .iter()
+            .map(|data_type| {
+                Field::new(
+                    &format_state_name(&self.name, "count distinct"),
+                    DataType::List(Box::new(data_type.clone())),
+                    false,
+                )
+            })
+            .collect::<Vec<_>>())
+    }
+
+    fn expressions(&self) -> Vec<Arc<dyn PhysicalExpr>> {
+        self.exprs.clone()
+    }
+
+    fn create_accumulator(&self) -> Result<Rc<RefCell<dyn Accumulator>>> {
+        Ok(Rc::new(RefCell::new(DistinctCountAccumulator {
+            values: FnvHashSet::default(),
+            data_types: self.input_data_types.clone(),
+            count_data_type: self.data_type.clone(),
+        })))
+    }
+}
+
+#[derive(Debug)]
+struct DistinctCountAccumulator {
+    values: FnvHashSet<DistinctScalarValues>,
+    data_types: Vec<DataType>,
+    count_data_type: DataType,
+}
+
+impl Accumulator for DistinctCountAccumulator {
+    fn update_batch(&mut self, arrays: &Vec<ArrayRef>) -> Result<()> {

Review comment:
       this expression seems similar to the default `update_batch` (implemented by the trait `Accumulator`). Do you think there's a way to generalize the `default` to take this `Accumulator` into account, so that we can DRY some code? (same for `merge_batch`)

##########
File path: rust/datafusion/src/physical_plan/distinct_expressions.rs
##########
@@ -0,0 +1,203 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+//! Implementations for DISTINCT expressions, e.g. `COUNT(DISTINCT c)`
+
+use std::cell::RefCell;
+use std::convert::TryFrom;
+use std::fmt::Debug;
+use std::hash::Hash;
+use std::rc::Rc;
+use std::sync::Arc;
+
+use arrow::array::ArrayRef;
+use arrow::array::ListArray;
+use arrow::datatypes::{DataType, Field};
+
+use fnv::FnvHashSet;
+
+use crate::error::{ExecutionError, Result};
+use crate::physical_plan::group_scalar::GroupByScalar;
+use crate::physical_plan::{Accumulator, AggregateExpr, PhysicalExpr};
+use crate::scalar::ScalarValue;
+
+#[derive(Debug, PartialEq, Eq, Hash, Clone)]
+struct DistinctScalarValues(Vec<GroupByScalar>);
+
+fn format_state_name(name: &str, state_name: &str) -> String {
+    format!("{}[{}]", name, state_name)
+}
+
+/// Expression for a COUNT(DISTINCT) aggregation.
+#[derive(Debug)]
+pub struct DistinctCount {
+    /// Column name
+    name: String,
+    /// The DataType for the final count
+    data_type: DataType,
+    /// The DataType for each input argument
+    input_data_types: Vec<DataType>,
+    /// The input arguments
+    exprs: Vec<Arc<dyn PhysicalExpr>>,
+}
+
+impl DistinctCount {
+    /// Create a new COUNT(DISTINCT) aggregate function.
+    pub fn new(
+        input_data_types: Vec<DataType>,
+        exprs: Vec<Arc<dyn PhysicalExpr>>,
+        name: String,
+        data_type: DataType,
+    ) -> Self {
+        Self {
+            input_data_types,
+            exprs,
+            name,
+            data_type,
+        }
+    }
+}
+
+impl AggregateExpr for DistinctCount {
+    fn field(&self) -> Result<Field> {
+        Ok(Field::new(&self.name, self.data_type.clone(), false))
+    }
+
+    fn state_fields(&self) -> Result<Vec<Field>> {
+        Ok(self
+            .input_data_types
+            .iter()
+            .map(|data_type| {
+                Field::new(
+                    &format_state_name(&self.name, "count distinct"),
+                    DataType::List(Box::new(data_type.clone())),
+                    false,
+                )
+            })
+            .collect::<Vec<_>>())
+    }
+
+    fn expressions(&self) -> Vec<Arc<dyn PhysicalExpr>> {
+        self.exprs.clone()
+    }
+
+    fn create_accumulator(&self) -> Result<Rc<RefCell<dyn Accumulator>>> {
+        Ok(Rc::new(RefCell::new(DistinctCountAccumulator {
+            values: FnvHashSet::default(),
+            data_types: self.input_data_types.clone(),
+            count_data_type: self.data_type.clone(),
+        })))
+    }
+}
+
+#[derive(Debug)]
+struct DistinctCountAccumulator {
+    values: FnvHashSet<DistinctScalarValues>,
+    data_types: Vec<DataType>,
+    count_data_type: DataType,
+}
+
+impl Accumulator for DistinctCountAccumulator {
+    fn update_batch(&mut self, arrays: &Vec<ArrayRef>) -> Result<()> {
+        for row in 0..arrays[0].len() {
+            let row_values = arrays
+                .iter()
+                .map(|array| ScalarValue::try_from_array(array, row))
+                .collect::<Result<Vec<_>>>()?;
+
+            self.update(&row_values)?;
+        }
+
+        Ok(())
+    }
+
+    fn update(&mut self, values: &Vec<ScalarValue>) -> Result<()> {
+        // If a row has a NULL, it is not included in the final count.
+        if !values.iter().any(|v| v.is_null()) {
+            self.values.insert(DistinctScalarValues(
+                values
+                    .iter()
+                    .map(GroupByScalar::try_from)
+                    .collect::<Result<Vec<_>>>()?,
+            ));
+        }
+
+        Ok(())
+    }
+
+    fn merge(&mut self, states: &Vec<ScalarValue>) -> Result<()> {
+        self.update(states)
+    }
+
+    fn merge_batch(&mut self, states: &Vec<ArrayRef>) -> Result<()> {
+        let list_arrays = states
+            .iter()
+            .map(|state_array| {
+                state_array.as_any().downcast_ref::<ListArray>().ok_or(
+                    ExecutionError::InternalError(
+                        "Failed to downcast ListArray".to_string(),
+                    ),
+                )
+            })
+            .collect::<Result<Vec<_>>>()?;
+
+        let values_arrays = list_arrays
+            .iter()
+            .map(|list_array| list_array.values())
+            .collect::<Vec<_>>();
+
+        self.update_batch(&values_arrays)
+    }
+
+    fn state(&self) -> Result<Vec<ScalarValue>> {
+        let mut cols_out = self
+            .data_types
+            .iter()
+            .map(|data_type| ScalarValue::List(Some(Vec::new()), data_type.clone()))
+            .collect::<Vec<_>>();
+
+        let mut cols_vec = cols_out
+            .iter_mut()
+            .map(|c| match c {
+                ScalarValue::List(Some(ref mut v), _) => v,
+                _ => unreachable!(),
+            })
+            .collect::<Vec<_>>();
+
+        self.values.iter().for_each(|distinct_values| {
+            distinct_values.0.iter().enumerate().for_each(
+                |(col_index, distinct_value)| {
+                    cols_vec[col_index].push(ScalarValue::from(distinct_value));
+                },
+            )
+        });
+
+        Ok(cols_out)
+    }
+
+    fn evaluate(&self) -> Result<ScalarValue> {
+        match &self.count_data_type {
+            DataType::UInt64 => Ok(ScalarValue::UInt64(Some(self.values.len() as u64))),
+            t => {
+                return Err(ExecutionError::InternalError(format!(
+                    "Invalid data type {:?} for count distinct aggregation",
+                    t
+                )))
+            }
+        }
+    }
+}

Review comment:
       I think I miss some tests here. Something like:
   
   * one without nulls
   * when with some nulls
   * one with only nulls
   * one with an empty batch
   
   (you can take a look at `expressions.rs` for inspiration ^_^)
   




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
users@infra.apache.org