You are viewing a plain text version of this content. The canonical link for it is here.
Posted to github@arrow.apache.org by GitBox <gi...@apache.org> on 2022/11/09 06:59:37 UTC

[GitHub] [arrow-datafusion] yahoNanJing commented on a diff in pull request #4122: [Part3] Partition and Sort Enforcement, Enforcement rule implementation

yahoNanJing commented on code in PR #4122:
URL: https://github.com/apache/arrow-datafusion/pull/4122#discussion_r1017500647


##########
datafusion/core/src/physical_optimizer/enforcement.rs:
##########
@@ -0,0 +1,1739 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+//! Enforcement optimizer rules are used to make sure the plan's Distribution and Ordering
+//! requirements are met by inserting necessary [[RepartitionExec]] and [[SortExec]].
+//!
+use crate::error::Result;
+use crate::physical_optimizer::PhysicalOptimizerRule;
+use crate::physical_plan::aggregates::{AggregateExec, AggregateMode, PhysicalGroupBy};
+use crate::physical_plan::coalesce_partitions::CoalescePartitionsExec;
+use crate::physical_plan::joins::{
+    CrossJoinExec, HashJoinExec, PartitionMode, SortMergeJoinExec,
+};
+use crate::physical_plan::projection::ProjectionExec;
+use crate::physical_plan::repartition::RepartitionExec;
+use crate::physical_plan::rewrite::TreeNodeRewritable;
+use crate::physical_plan::sorts::sort::SortExec;
+use crate::physical_plan::Partitioning;
+use crate::physical_plan::{with_new_children_if_necessary, Distribution, ExecutionPlan};
+use crate::prelude::SessionConfig;
+use datafusion_expr::logical_plan::JoinType;
+use datafusion_physical_expr::equivalence::EquivalenceProperties;
+use datafusion_physical_expr::expressions::Column;
+use datafusion_physical_expr::expressions::NoOp;
+use datafusion_physical_expr::{
+    expr_list_eq_strict_order, normalize_expr_with_equivalence_properties,
+    normalize_sort_expr_with_equivalence_properties, PhysicalExpr, PhysicalSortExpr,
+};
+use std::collections::HashMap;
+use std::sync::Arc;
+
+/// BasicEnforcement rule, it ensures the Distribution and Ordering requirements are met
+/// in the strictest way. It might add additional [[RepartitionExec]] to the plan tree
+/// and give a non-optimal plan, but it can avoid the possible data skew in joins
+///
+/// For example for a HashJoin with keys(a, b, c), the required Distribution(a, b, c) can be satisfied by
+/// several alternative partitioning ways: [(a, b, c), (a, b), (a, c), (b, c), (a), (b), (c), ( )].
+///
+/// This rule only chooses the exactly match and satisfies the Distribution(a, b, c) by a HashPartition(a, b, c).
+#[derive(Default)]
+pub struct BasicEnforcement {}
+
+impl BasicEnforcement {
+    #[allow(missing_docs)]
+    pub fn new() -> Self {
+        Self {}
+    }
+}
+
+impl PhysicalOptimizerRule for BasicEnforcement {
+    fn optimize(
+        &self,
+        plan: Arc<dyn ExecutionPlan>,
+        config: &SessionConfig,
+    ) -> Result<Arc<dyn ExecutionPlan>> {
+        let target_partitions = config.target_partitions;
+        let top_down_join_key_reordering = config.top_down_join_key_reordering;
+        let new_plan = if top_down_join_key_reordering {
+            // Run a top-down process to adjust input key ordering recursively
+            adjust_input_keys_down_recursively(plan, vec![])?
+        } else {
+            plan
+        };
+        // Distribution and Ordering enforcement need to be applied bottom-up.
+        new_plan.transform_up(&{
+            |plan| {
+                let adjusted = if !top_down_join_key_reordering {
+                    reorder_join_keys_to_inputs(plan)
+                } else {
+                    plan
+                };
+                Some(ensure_distribution_and_ordering(
+                    adjusted,
+                    target_partitions,
+                ))
+            }
+        })
+    }
+
+    fn name(&self) -> &str {
+        "BasicEnforcement"
+    }
+}
+
+/// When the physical planner creates the Joins, the ordering of join keys is from the original query.
+/// That might not match with the output partitioning of the join node's children
+/// This method runs a top-down process and try to adjust the output partitioning of the children
+/// if the children themselves are Joins or Aggregations.
+fn adjust_input_keys_down_recursively(
+    plan: Arc<dyn crate::physical_plan::ExecutionPlan>,
+    parent_required: Vec<Arc<dyn PhysicalExpr>>,
+) -> Result<Arc<dyn crate::physical_plan::ExecutionPlan>> {
+    let plan_any = plan.as_any();
+    if let Some(HashJoinExec {
+        left,
+        right,
+        on,
+        filter,
+        join_type,
+        mode,
+        null_equals_null,
+        ..
+    }) = plan_any.downcast_ref::<HashJoinExec>()
+    {
+        match mode {
+            PartitionMode::Partitioned => {
+                let join_key_pairs = extract_join_keys(on);
+                if let Some((
+                    JoinKeyPairs {
+                        left_keys,
+                        right_keys,
+                    },
+                    new_positions,
+                )) = try_reorder(
+                    join_key_pairs.clone(),
+                    parent_required,
+                    &plan.equivalence_properties(),
+                ) {
+                    let new_join_on = if !new_positions.is_empty() {
+                        new_join_conditions(&left_keys, &right_keys)
+                    } else {
+                        on.clone()
+                    };
+                    let new_left =
+                        adjust_input_keys_down_recursively(left.clone(), left_keys)?;
+                    let new_right =
+                        adjust_input_keys_down_recursively(right.clone(), right_keys)?;
+                    Ok(Arc::new(HashJoinExec::try_new(
+                        new_left,
+                        new_right,
+                        new_join_on,
+                        filter.clone(),
+                        join_type,
+                        PartitionMode::Partitioned,
+                        null_equals_null,
+                    )?))
+                } else {
+                    let new_left = adjust_input_keys_down_recursively(
+                        left.clone(),
+                        join_key_pairs.left_keys,
+                    )?;
+                    let new_right = adjust_input_keys_down_recursively(
+                        right.clone(),
+                        join_key_pairs.right_keys,
+                    )?;
+                    Ok(Arc::new(HashJoinExec::try_new(
+                        new_left,
+                        new_right,
+                        on.clone(),
+                        filter.clone(),
+                        join_type,
+                        PartitionMode::Partitioned,
+                        null_equals_null,
+                    )?))
+                }
+            }
+            PartitionMode::CollectLeft => {
+                let new_left = adjust_input_keys_down_recursively(left.clone(), vec![])?;
+                let new_right = match join_type {
+                    JoinType::Inner | JoinType::Right => try_push_required_to_right(
+                        parent_required,
+                        right.clone(),
+                        left.schema().fields().len(),
+                    )?,
+                    JoinType::RightSemi | JoinType::RightAnti => {
+                        adjust_input_keys_down_recursively(
+                            right.clone(),
+                            parent_required.clone(),
+                        )?
+                    }
+                    JoinType::Left
+                    | JoinType::LeftSemi
+                    | JoinType::LeftAnti
+                    | JoinType::Full => {
+                        adjust_input_keys_down_recursively(right.clone(), vec![])?
+                    }
+                };
+
+                Ok(Arc::new(HashJoinExec::try_new(
+                    new_left,
+                    new_right,
+                    on.clone(),
+                    filter.clone(),
+                    join_type,
+                    PartitionMode::CollectLeft,
+                    null_equals_null,
+                )?))
+            }
+        }
+    } else if let Some(CrossJoinExec { left, right, .. }) =
+        plan_any.downcast_ref::<CrossJoinExec>()
+    {
+        let new_left = adjust_input_keys_down_recursively(left.clone(), vec![])?;
+        let new_right = try_push_required_to_right(
+            parent_required,
+            right.clone(),
+            left.schema().fields().len(),
+        )?;
+        Ok(Arc::new(CrossJoinExec::try_new(new_left, new_right)?))
+    } else if let Some(SortMergeJoinExec {
+        left,
+        right,
+        on,
+        join_type,
+        sort_options,
+        null_equals_null,
+        ..
+    }) = plan_any.downcast_ref::<SortMergeJoinExec>()
+    {
+        let join_key_pairs = extract_join_keys(on);
+        if let Some((
+            JoinKeyPairs {
+                left_keys,
+                right_keys,
+            },
+            new_positions,
+        )) = try_reorder(
+            join_key_pairs.clone(),
+            parent_required,
+            &plan.equivalence_properties(),
+        ) {
+            let new_join_on = if !new_positions.is_empty() {
+                new_join_conditions(&left_keys, &right_keys)
+            } else {
+                on.clone()
+            };
+            let new_options = if !new_positions.is_empty() {
+                let mut new_sort_options = vec![];
+                for idx in 0..sort_options.len() {
+                    new_sort_options.push(sort_options[new_positions[idx]])
+                }
+                new_sort_options
+            } else {
+                sort_options.clone()
+            };
+
+            let new_left = adjust_input_keys_down_recursively(left.clone(), left_keys)?;
+            let new_right =
+                adjust_input_keys_down_recursively(right.clone(), right_keys)?;
+
+            Ok(Arc::new(SortMergeJoinExec::try_new(
+                new_left,
+                new_right,
+                new_join_on,
+                *join_type,
+                new_options,
+                *null_equals_null,
+            )?))
+        } else {
+            let new_left = adjust_input_keys_down_recursively(
+                left.clone(),
+                join_key_pairs.left_keys,
+            )?;
+            let new_right = adjust_input_keys_down_recursively(
+                right.clone(),
+                join_key_pairs.right_keys,
+            )?;
+            Ok(Arc::new(SortMergeJoinExec::try_new(
+                new_left,
+                new_right,
+                on.clone(),
+                *join_type,
+                sort_options.clone(),
+                *null_equals_null,
+            )?))
+        }
+    } else if let Some(AggregateExec {
+        mode,
+        group_by,
+        aggr_expr,
+        input,
+        input_schema,
+        ..
+    }) = plan_any.downcast_ref::<AggregateExec>()
+    {
+        if parent_required.is_empty() {
+            plan.map_children(|plan| adjust_input_keys_down_recursively(plan, vec![]))
+        } else {
+            match mode {
+                AggregateMode::Final => plan.map_children(|plan| {
+                    adjust_input_keys_down_recursively(plan, vec![])
+                }),
+                AggregateMode::FinalPartitioned | AggregateMode::Partial => {
+                    let out_put_columns = group_by
+                        .expr()
+                        .iter()
+                        .enumerate()
+                        .map(|(index, (_col, name))| Column::new(name, index))
+                        .collect::<Vec<_>>();
+
+                    let out_put_exprs = out_put_columns
+                        .iter()
+                        .map(|c| Arc::new(c.clone()) as Arc<dyn PhysicalExpr>)
+                        .collect::<Vec<_>>();
+
+                    // Check whether the requirements can be satisfied by the Aggregation
+                    if parent_required.len() != out_put_exprs.len()
+                        || expr_list_eq_strict_order(&out_put_exprs, &parent_required)
+                        || !group_by.null_expr().is_empty()
+                    {
+                        plan.map_children(|plan| {
+                            adjust_input_keys_down_recursively(plan, vec![])
+                        })
+                    } else {
+                        let new_positions =
+                            expected_expr_positions(&out_put_exprs, &parent_required);
+                        match new_positions {
+                            Some(positions) => {
+                                let mut new_group_exprs = vec![];
+                                for idx in positions.into_iter() {
+                                    new_group_exprs.push(group_by.expr()[idx].clone());
+                                }
+                                let new_group_by =
+                                    PhysicalGroupBy::new_single(new_group_exprs);
+                                match mode {
+                                    AggregateMode::FinalPartitioned => {
+                                        let new_input =
+                                            adjust_input_keys_down_recursively(
+                                                input.clone(),
+                                                parent_required,
+                                            )?;
+                                        let new_agg = Arc::new(AggregateExec::try_new(
+                                            AggregateMode::FinalPartitioned,
+                                            new_group_by,
+                                            aggr_expr.clone(),
+                                            new_input,
+                                            input_schema.clone(),
+                                        )?);
+
+                                        // Need to create a new projection to change the expr ordering back
+                                        let mut proj_exprs = out_put_columns
+                                            .iter()
+                                            .map(|col| {
+                                                (
+                                                    Arc::new(Column::new(
+                                                        col.name(),
+                                                        new_agg
+                                                            .schema()
+                                                            .index_of(col.name())
+                                                            .unwrap(),
+                                                    ))
+                                                        as Arc<dyn PhysicalExpr>,
+                                                    col.name().to_owned(),
+                                                )
+                                            })
+                                            .collect::<Vec<_>>();
+                                        let agg_schema = new_agg.schema();
+                                        let agg_fields = agg_schema.fields();
+                                        for (idx, field) in agg_fields
+                                            .iter()
+                                            .enumerate()
+                                            .skip(out_put_columns.len())
+                                        {
+                                            proj_exprs.push((
+                                                Arc::new(Column::new(
+                                                    field.name().as_str(),
+                                                    idx,
+                                                ))
+                                                    as Arc<dyn PhysicalExpr>,
+                                                field.name().clone(),
+                                            ))
+                                        }
+                                        // TODO merge adjacent Projections if there are
+                                        Ok(Arc::new(ProjectionExec::try_new(
+                                            proj_exprs, new_agg,
+                                        )?))
+                                    }
+                                    AggregateMode::Partial => {
+                                        let new_input =
+                                            adjust_input_keys_down_recursively(
+                                                input.clone(),
+                                                vec![],
+                                            )?;
+                                        Ok(Arc::new(AggregateExec::try_new(
+                                            AggregateMode::Partial,
+                                            new_group_by,
+                                            aggr_expr.clone(),
+                                            new_input,
+                                            input_schema.clone(),
+                                        )?))
+                                    }
+                                    _ => Ok(plan),
+                                }
+                            }
+                            None => plan.map_children(|plan| {
+                                adjust_input_keys_down_recursively(plan, vec![])
+                            }),
+                        }
+                    }
+                }
+            }
+        }
+    } else if let Some(ProjectionExec { expr, .. }) =
+        plan_any.downcast_ref::<ProjectionExec>()
+    {
+        // For Projection, we need to transform the columns to the columns before the Projection
+        // And then to push down the requirements
+        // Construct a mapping from new name to the the orginal Column
+        let mut column_mapping = HashMap::new();
+        for (expression, name) in expr.iter() {
+            if let Some(column) = expression.as_any().downcast_ref::<Column>() {
+                column_mapping.insert(name.clone(), column.clone());
+            };
+        }
+        let new_required: Vec<Arc<dyn PhysicalExpr>> = parent_required
+            .iter()
+            .filter_map(|r| {
+                if let Some(column) = r.as_any().downcast_ref::<Column>() {
+                    column_mapping.get(column.name())
+                } else {
+                    None
+                }
+            })
+            .map(|e| Arc::new(e.clone()) as Arc<dyn PhysicalExpr>)
+            .collect::<Vec<_>>();
+        if new_required.len() == parent_required.len() {
+            plan.map_children(|plan| {
+                adjust_input_keys_down_recursively(plan, new_required.clone())
+            })
+        } else {
+            plan.map_children(|plan| adjust_input_keys_down_recursively(plan, vec![]))
+        }
+    } else if plan_any.downcast_ref::<RepartitionExec>().is_some()
+        || plan_any.downcast_ref::<CoalescePartitionsExec>().is_some()
+    {
+        plan.map_children(|plan| adjust_input_keys_down_recursively(plan, vec![]))
+    } else {
+        plan.map_children(|plan| {
+            adjust_input_keys_down_recursively(plan, parent_required.clone())
+        })
+    }
+}
+
+fn try_push_required_to_right(
+    parent_required: Vec<Arc<dyn PhysicalExpr>>,
+    right: Arc<dyn ExecutionPlan>,
+    left_columns_len: usize,
+) -> Result<Arc<dyn ExecutionPlan>> {
+    let new_required: Vec<Arc<dyn PhysicalExpr>> = parent_required
+        .iter()
+        .filter_map(|r| {
+            if let Some(col) = r.as_any().downcast_ref::<Column>() {
+                if col.index() >= left_columns_len {
+                    Some(
+                        Arc::new(Column::new(col.name(), col.index() - left_columns_len))
+                            as Arc<dyn PhysicalExpr>,
+                    )
+                } else {
+                    None
+                }
+            } else {
+                None
+            }
+        })
+        .collect::<Vec<_>>();
+
+    // if the parent required are all comming from the right side, the requirements can be pushdown
+    if new_required.len() == parent_required.len() {
+        adjust_input_keys_down_recursively(right.clone(), new_required)
+    } else {
+        adjust_input_keys_down_recursively(right.clone(), vec![])
+    }
+}
+
+/// When the physical planner creates the Joins, the ordering of join keys is from the original query.
+/// That might not match with the output partitioning of the join node's children
+/// This method will try to change the ordering of the join keys to match with the
+/// partitioning of the join nodes' children.
+/// If it can not match with both sides, it will try to match with one, either left side or right side.
+fn reorder_join_keys_to_inputs(
+    plan: Arc<dyn crate::physical_plan::ExecutionPlan>,
+) -> Arc<dyn crate::physical_plan::ExecutionPlan> {
+    let plan_any = plan.as_any();
+    if let Some(HashJoinExec {
+        left,
+        right,
+        on,
+        filter,
+        join_type,
+        mode,
+        null_equals_null,
+        ..
+    }) = plan_any.downcast_ref::<HashJoinExec>()
+    {
+        match mode {
+            PartitionMode::Partitioned => {
+                let join_key_pairs = extract_join_keys(on);
+                if let Some((
+                    JoinKeyPairs {
+                        left_keys,
+                        right_keys,
+                    },
+                    new_positions,
+                )) = reorder_current_join_keys(
+                    join_key_pairs,
+                    Some(left.output_partitioning()),
+                    Some(right.output_partitioning()),
+                    &plan.equivalence_properties(),
+                ) {
+                    if !new_positions.is_empty() {
+                        let new_join_on = new_join_conditions(&left_keys, &right_keys);
+                        Arc::new(
+                            HashJoinExec::try_new(
+                                left.clone(),
+                                right.clone(),
+                                new_join_on,
+                                filter.clone(),
+                                join_type,
+                                PartitionMode::Partitioned,
+                                null_equals_null,
+                            )
+                            .unwrap(),
+                        )
+                    } else {
+                        plan
+                    }
+                } else {
+                    plan
+                }
+            }
+            _ => plan,
+        }
+    } else if let Some(SortMergeJoinExec {
+        left,
+        right,
+        on,
+        join_type,
+        sort_options,
+        null_equals_null,
+        ..
+    }) = plan_any.downcast_ref::<SortMergeJoinExec>()
+    {
+        let join_key_pairs = extract_join_keys(on);
+        if let Some((
+            JoinKeyPairs {
+                left_keys,
+                right_keys,
+            },
+            new_positions,
+        )) = reorder_current_join_keys(
+            join_key_pairs,
+            Some(left.output_partitioning()),
+            Some(right.output_partitioning()),
+            &plan.equivalence_properties(),
+        ) {
+            if !new_positions.is_empty() {
+                let new_join_on = new_join_conditions(&left_keys, &right_keys);
+                let mut new_sort_options = vec![];
+                for idx in 0..sort_options.len() {
+                    new_sort_options.push(sort_options[new_positions[idx]])
+                }
+                Arc::new(
+                    SortMergeJoinExec::try_new(
+                        left.clone(),
+                        right.clone(),
+                        new_join_on,
+                        *join_type,
+                        new_sort_options,
+                        *null_equals_null,
+                    )
+                    .unwrap(),
+                )
+            } else {
+                plan
+            }
+        } else {
+            plan
+        }
+    } else {
+        plan
+    }
+}
+
+/// Reorder the current join keys ordering based on either left partition or right partition.
+fn reorder_current_join_keys(
+    join_keys: JoinKeyPairs,
+    left_partition: Option<Partitioning>,
+    right_partition: Option<Partitioning>,
+    equivalence_properties: &EquivalenceProperties,
+) -> Option<(JoinKeyPairs, Vec<usize>)> {
+    match (left_partition.clone(), right_partition.clone()) {
+        (Some(Partitioning::Hash(left_exprs, _)), _) => {
+            try_reorder(join_keys.clone(), left_exprs, equivalence_properties).or_else(
+                || {
+                    reorder_current_join_keys(
+                        join_keys,
+                        None,
+                        right_partition,
+                        equivalence_properties,
+                    )
+                },
+            )
+        }
+        (_, Some(Partitioning::Hash(right_exprs, _))) => {
+            try_reorder(join_keys.clone(), right_exprs, equivalence_properties).or_else(
+                || {
+                    reorder_current_join_keys(
+                        join_keys,
+                        left_partition,
+                        None,
+                        equivalence_properties,
+                    )
+                },
+            )
+        }
+        _ => None,
+    }
+}
+
+fn try_reorder(

Review Comment:
   It's better to add some comments here for the parameters.
   
   - For the top-down case, the `join_keys` are using the its children's schema; while the `expected` and `equivalence_properties` are using the current operator's schema.
   - For the bottom-up case, the `join_keys` and `expected` are using the its children's schema; while the `equivalence_properties` are using the current operator's schema. 
   
   Is it possible to make the schema the same for all of the parameters? Otherwise, for the right side of the bottom-up case, it may miss some reorder chance.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: github-unsubscribe@arrow.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org