You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@arrow.apache.org by yj...@apache.org on 2022/04/07 02:46:52 UTC
[arrow-datafusion] branch maint-7.x updated: 7.x: fix clippy (#2166)
This is an automated email from the ASF dual-hosted git repository.
yjshen pushed a commit to branch maint-7.x
in repository https://gitbox.apache.org/repos/asf/arrow-datafusion.git
The following commit(s) were added to refs/heads/maint-7.x by this push:
new 5aa689fc4 7.x: fix clippy (#2166)
5aa689fc4 is described below
commit 5aa689fc4c3d4e6b9a062f4d74dcfe2d090896e1
Author: Rich <jy...@users.noreply.github.com>
AuthorDate: Wed Apr 6 22:46:47 2022 -0400
7.x: fix clippy (#2166)
* fix clippy
* fix if else
* fix clippy
* fix clippy
* fix clippy
* fix clippy
---
ballista/rust/core/src/serde/logical_plan/to_proto.rs | 2 +-
datafusion/src/execution/context.rs | 6 +++---
datafusion/src/logical_plan/builder.rs | 3 +--
datafusion/src/logical_plan/plan.rs | 3 +--
datafusion/src/optimizer/filter_push_down.rs | 3 +--
datafusion/src/physical_plan/hash_aggregate.rs | 9 ++++-----
datafusion/src/physical_plan/regex_expressions.rs | 2 +-
datafusion/src/physical_plan/union.rs | 6 ++----
datafusion/src/physical_plan/values.rs | 2 +-
datafusion/tests/merge_fuzz.rs | 3 +--
datafusion/tests/order_spill_fuzz.rs | 3 +--
11 files changed, 17 insertions(+), 25 deletions(-)
diff --git a/ballista/rust/core/src/serde/logical_plan/to_proto.rs b/ballista/rust/core/src/serde/logical_plan/to_proto.rs
index 16f19b0f4..9cb8c4166 100644
--- a/ballista/rust/core/src/serde/logical_plan/to_proto.rs
+++ b/ballista/rust/core/src/serde/logical_plan/to_proto.rs
@@ -198,7 +198,7 @@ impl From<&DataType> for protobuf::arrow_type::ArrowTypeEnum {
DataType::Timestamp(time_unit, timezone) => {
ArrowTypeEnum::Timestamp(protobuf::Timestamp {
time_unit: protobuf::TimeUnit::from_arrow_time_unit(time_unit) as i32,
- timezone: timezone.to_owned().unwrap_or_else(String::new),
+ timezone: timezone.to_owned().unwrap_or_default(),
})
}
DataType::Date32 => ArrowTypeEnum::Date32(EmptyMessage {}),
diff --git a/datafusion/src/execution/context.rs b/datafusion/src/execution/context.rs
index 393e4afdf..09b386ed6 100644
--- a/datafusion/src/execution/context.rs
+++ b/datafusion/src/execution/context.rs
@@ -1151,7 +1151,7 @@ impl ExecutionProps {
var_type: VarType,
provider: Arc<dyn VarProvider + Send + Sync>,
) -> Option<Arc<dyn VarProvider + Send + Sync>> {
- let mut var_providers = self.var_providers.take().unwrap_or_else(HashMap::new);
+ let mut var_providers = self.var_providers.take().unwrap_or_default();
let old_provider = var_providers.insert(var_type, provider);
@@ -3602,7 +3602,7 @@ mod tests {
let logical_plan = ctx.create_logical_plan(sql)?;
let logical_plan = ctx.optimize(&logical_plan)?;
let physical_plan = ctx.create_physical_plan(&logical_plan).await?;
- ctx.write_csv(physical_plan, out_dir.to_string()).await
+ ctx.write_csv(physical_plan, out_dir).await
}
/// Execute SQL and write results to partitioned parquet files
@@ -3615,7 +3615,7 @@ mod tests {
let logical_plan = ctx.create_logical_plan(sql)?;
let logical_plan = ctx.optimize(&logical_plan)?;
let physical_plan = ctx.create_physical_plan(&logical_plan).await?;
- ctx.write_parquet(physical_plan, out_dir.to_string(), writer_properties)
+ ctx.write_parquet(physical_plan, out_dir, writer_properties)
.await
}
diff --git a/datafusion/src/logical_plan/builder.rs b/datafusion/src/logical_plan/builder.rs
index 0144b7516..086d331f7 100644
--- a/datafusion/src/logical_plan/builder.rs
+++ b/datafusion/src/logical_plan/builder.rs
@@ -1041,14 +1041,13 @@ pub(crate) fn expand_wildcard(
let columns_to_skip = using_columns
.into_iter()
// For each USING JOIN condition, only expand to one column in projection
- .map(|cols| {
+ .flat_map(|cols| {
let mut cols = cols.into_iter().collect::<Vec<_>>();
// sort join columns to make sure we consistently keep the same
// qualified column
cols.sort();
cols.into_iter().skip(1)
})
- .flatten()
.collect::<HashSet<_>>();
if columns_to_skip.is_empty() {
diff --git a/datafusion/src/logical_plan/plan.rs b/datafusion/src/logical_plan/plan.rs
index 3d49e5484..64907302e 100644
--- a/datafusion/src/logical_plan/plan.rs
+++ b/datafusion/src/logical_plan/plan.rs
@@ -543,8 +543,7 @@ impl LogicalPlan {
{
self.using_columns.push(
on.iter()
- .map(|entry| [&entry.0, &entry.1])
- .flatten()
+ .flat_map(|entry| [&entry.0, &entry.1])
.cloned()
.collect::<HashSet<Column>>(),
);
diff --git a/datafusion/src/optimizer/filter_push_down.rs b/datafusion/src/optimizer/filter_push_down.rs
index 78911313e..d8e43ed21 100644
--- a/datafusion/src/optimizer/filter_push_down.rs
+++ b/datafusion/src/optimizer/filter_push_down.rs
@@ -228,14 +228,13 @@ fn get_pushable_join_predicates<'a>(
let schema_columns = schema
.fields()
.iter()
- .map(|f| {
+ .flat_map(|f| {
[
f.qualified_column(),
// we need to push down filter using unqualified column as well
f.unqualified_column(),
]
})
- .flatten()
.collect::<HashSet<_>>();
state
diff --git a/datafusion/src/physical_plan/hash_aggregate.rs b/datafusion/src/physical_plan/hash_aggregate.rs
index b727cdd2e..d2b27b845 100644
--- a/datafusion/src/physical_plan/hash_aggregate.rs
+++ b/datafusion/src/physical_plan/hash_aggregate.rs
@@ -1176,12 +1176,11 @@ mod tests {
_partition: usize,
_runtime: Arc<RuntimeEnv>,
) -> Result<SendableRecordBatchStream> {
- let stream;
- if self.yield_first {
- stream = TestYieldingStream::New;
+ let stream = if self.yield_first {
+ TestYieldingStream::New
} else {
- stream = TestYieldingStream::Yielded;
- }
+ TestYieldingStream::Yielded
+ };
Ok(Box::pin(stream))
}
diff --git a/datafusion/src/physical_plan/regex_expressions.rs b/datafusion/src/physical_plan/regex_expressions.rs
index 487e1f14c..cf997c032 100644
--- a/datafusion/src/physical_plan/regex_expressions.rs
+++ b/datafusion/src/physical_plan/regex_expressions.rs
@@ -139,7 +139,7 @@ pub fn regexp_replace<T: StringOffsetSizeTrait>(args: &[ArrayRef]) -> Result<Arr
let (pattern, replace_all) = if flags == "g" {
(pattern.to_string(), true)
} else if flags.contains('g') {
- (format!("(?{}){}", flags.to_string().replace("g", ""), pattern), true)
+ (format!("(?{}){}", flags.to_string().replace('g', ""), pattern), true)
} else {
(format!("(?{}){}", flags, pattern), false)
};
diff --git a/datafusion/src/physical_plan/union.rs b/datafusion/src/physical_plan/union.rs
index 03c41f4e1..48f7b280b 100644
--- a/datafusion/src/physical_plan/union.rs
+++ b/datafusion/src/physical_plan/union.rs
@@ -201,14 +201,12 @@ fn col_stats_union(
.min_value
.zip(right.min_value)
.map(|(a, b)| expressions::helpers::min(&a, &b))
- .map(Result::ok)
- .flatten();
+ .and_then(Result::ok);
left.max_value = left
.max_value
.zip(right.max_value)
.map(|(a, b)| expressions::helpers::max(&a, &b))
- .map(Result::ok)
- .flatten();
+ .and_then(Result::ok);
left.null_count = left.null_count.zip(right.null_count).map(|(a, b)| a + b);
left
diff --git a/datafusion/src/physical_plan/values.rs b/datafusion/src/physical_plan/values.rs
index da39b3eb4..c65082ef0 100644
--- a/datafusion/src/physical_plan/values.rs
+++ b/datafusion/src/physical_plan/values.rs
@@ -190,7 +190,7 @@ mod tests {
async fn values_empty_case() -> Result<()> {
let schema = test_util::aggr_test_schema();
let empty = ValuesExec::try_new(schema, vec![]);
- assert!(!empty.is_ok());
+ assert!(empty.is_err());
Ok(())
}
}
diff --git a/datafusion/tests/merge_fuzz.rs b/datafusion/tests/merge_fuzz.rs
index 6821c6ba5..d874ec507 100644
--- a/datafusion/tests/merge_fuzz.rs
+++ b/datafusion/tests/merge_fuzz.rs
@@ -104,8 +104,7 @@ async fn run_merge_test(input: Vec<Vec<RecordBatch>>) {
for batch_size in batch_sizes {
let first_batch = input
.iter()
- .map(|p| p.iter())
- .flatten()
+ .flat_map(|p| p.iter())
.next()
.expect("at least one batch");
let schema = first_batch.schema();
diff --git a/datafusion/tests/order_spill_fuzz.rs b/datafusion/tests/order_spill_fuzz.rs
index 049fe6a4f..b1586f06c 100644
--- a/datafusion/tests/order_spill_fuzz.rs
+++ b/datafusion/tests/order_spill_fuzz.rs
@@ -58,8 +58,7 @@ async fn run_sort(pool_size: usize, size_spill: Vec<(usize, bool)>) {
let input = vec![make_staggered_batches(size)];
let first_batch = input
.iter()
- .map(|p| p.iter())
- .flatten()
+ .flat_map(|p| p.iter())
.next()
.expect("at least one batch");
let schema = first_batch.schema();