You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@arrow.apache.org by al...@apache.org on 2022/11/13 10:56:50 UTC

[arrow-datafusion] branch master updated: fix clippy (#4190)

This is an automated email from the ASF dual-hosted git repository.

alamb pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/arrow-datafusion.git


The following commit(s) were added to refs/heads/master by this push:
     new 96512ac05 fix clippy (#4190)
96512ac05 is described below

commit 96512ac05df40deb42632992cece656842d6debb
Author: jakevin <ja...@gmail.com>
AuthorDate: Sun Nov 13 18:56:43 2022 +0800

    fix clippy (#4190)
    
    * fix clippy
    
    * fix more
    
    * fix fmt
---
 benchmarks/src/bin/tpch.rs                                     |  2 +-
 datafusion/core/benches/data_utils/mod.rs                      |  2 +-
 datafusion/core/benches/filter_query_sql.rs                    |  4 ++--
 datafusion/core/benches/math_query_sql.rs                      |  4 ++--
 datafusion/core/src/catalog/information_schema.rs              |  2 +-
 datafusion/core/src/datasource/listing/url.rs                  |  2 +-
 datafusion/core/src/datasource/mod.rs                          |  6 +++---
 datafusion/core/src/execution/context.rs                       |  4 ++--
 datafusion/core/src/physical_plan/common.rs                    |  4 ++--
 datafusion/core/src/physical_plan/file_format/csv.rs           |  4 ++--
 datafusion/core/src/physical_plan/file_format/json.rs          |  2 +-
 datafusion/core/src/physical_plan/file_format/parquet.rs       |  4 ++--
 .../core/src/physical_plan/file_format/parquet/row_filter.rs   |  6 +++---
 datafusion/core/src/test/mod.rs                                |  4 ++--
 datafusion/core/tests/path_partition.rs                        |  2 +-
 datafusion/core/tests/provider_filter_pushdown.rs              |  2 +-
 datafusion/core/tests/sql/aggregates.rs                        |  2 +-
 datafusion/core/tests/sql/group_by.rs                          |  2 +-
 datafusion/core/tests/sql/mod.rs                               |  2 +-
 datafusion/core/tests/sql/parquet_schema.rs                    |  2 +-
 datafusion/core/tests/sql/partitioned_csv.rs                   |  2 +-
 datafusion/optimizer/src/common_subexpr_eliminate.rs           |  2 +-
 datafusion/optimizer/src/filter_push_down.rs                   |  2 +-
 .../physical-expr/src/aggregate/approx_percentile_cont.rs      |  4 ++--
 datafusion/physical-expr/src/aggregate/tdigest.rs              |  8 +++-----
 datafusion/physical-expr/src/expressions/binary.rs             | 10 +++++-----
 datafusion/physical-expr/src/expressions/datetime.rs           |  2 +-
 datafusion/sql/src/planner.rs                                  |  6 +++---
 28 files changed, 48 insertions(+), 50 deletions(-)

diff --git a/benchmarks/src/bin/tpch.rs b/benchmarks/src/bin/tpch.rs
index f0977fe32..838709cf0 100644
--- a/benchmarks/src/bin/tpch.rs
+++ b/benchmarks/src/bin/tpch.rs
@@ -247,7 +247,7 @@ async fn benchmark_query(
         }
 
         let elapsed = start.elapsed().as_secs_f64() * 1000.0;
-        millis.push(elapsed as f64);
+        millis.push(elapsed);
         let row_count = result.iter().map(|b| b.num_rows()).sum();
         println!(
             "Query {} iteration {} took {:.1} ms and returned {} rows",
diff --git a/datafusion/core/benches/data_utils/mod.rs b/datafusion/core/benches/data_utils/mod.rs
index 66603fd42..575e1831c 100644
--- a/datafusion/core/benches/data_utils/mod.rs
+++ b/datafusion/core/benches/data_utils/mod.rs
@@ -131,7 +131,7 @@ fn create_record_batch(
         schema,
         vec![
             Arc::new(StringArray::from(keys)),
-            Arc::new(Float32Array::from_slice(&vec![i as f32; batch_size])),
+            Arc::new(Float32Array::from_slice(vec![i as f32; batch_size])),
             Arc::new(Float64Array::from(values)),
             Arc::new(UInt64Array::from(integer_values_wide)),
             Arc::new(UInt64Array::from(integer_values_narrow)),
diff --git a/datafusion/core/benches/filter_query_sql.rs b/datafusion/core/benches/filter_query_sql.rs
index 8dc981700..8b0142078 100644
--- a/datafusion/core/benches/filter_query_sql.rs
+++ b/datafusion/core/benches/filter_query_sql.rs
@@ -49,8 +49,8 @@ fn create_context(array_len: usize, batch_size: usize) -> Result<SessionContext>
             RecordBatch::try_new(
                 schema.clone(),
                 vec![
-                    Arc::new(Float32Array::from_slice(&vec![i as f32; batch_size])),
-                    Arc::new(Float64Array::from_slice(&vec![i as f64; batch_size])),
+                    Arc::new(Float32Array::from_slice(vec![i as f32; batch_size])),
+                    Arc::new(Float64Array::from_slice(vec![i as f64; batch_size])),
                 ],
             )
             .unwrap()
diff --git a/datafusion/core/benches/math_query_sql.rs b/datafusion/core/benches/math_query_sql.rs
index 11b107f02..87c7a363e 100644
--- a/datafusion/core/benches/math_query_sql.rs
+++ b/datafusion/core/benches/math_query_sql.rs
@@ -61,8 +61,8 @@ fn create_context(
             RecordBatch::try_new(
                 schema.clone(),
                 vec![
-                    Arc::new(Float32Array::from_slice(&vec![i as f32; batch_size])),
-                    Arc::new(Float64Array::from_slice(&vec![i as f64; batch_size])),
+                    Arc::new(Float32Array::from_slice(vec![i as f32; batch_size])),
+                    Arc::new(Float64Array::from_slice(vec![i as f64; batch_size])),
                 ],
             )
             .unwrap()
diff --git a/datafusion/core/src/catalog/information_schema.rs b/datafusion/core/src/catalog/information_schema.rs
index eca399e27..957cac53a 100644
--- a/datafusion/core/src/catalog/information_schema.rs
+++ b/datafusion/core/src/catalog/information_schema.rs
@@ -264,7 +264,7 @@ impl SchemaProvider for InformationSchemaProvider {
     }
 
     fn table_exist(&self, name: &str) -> bool {
-        return matches!(name.to_ascii_lowercase().as_str(), TABLES | VIEWS | COLUMNS);
+        matches!(name.to_ascii_lowercase().as_str(), TABLES | VIEWS | COLUMNS)
     }
 }
 
diff --git a/datafusion/core/src/datasource/listing/url.rs b/datafusion/core/src/datasource/listing/url.rs
index d1a527f23..1c1230954 100644
--- a/datafusion/core/src/datasource/listing/url.rs
+++ b/datafusion/core/src/datasource/listing/url.rs
@@ -235,7 +235,7 @@ mod tests {
         let root = std::env::current_dir().unwrap();
         let root = root.to_string_lossy();
 
-        let url = ListingTableUrl::parse(&root).unwrap();
+        let url = ListingTableUrl::parse(root).unwrap();
         let child = url.prefix.child("partition").child("file");
 
         let prefix: Vec<_> = url.strip_prefix(&child).unwrap().collect();
diff --git a/datafusion/core/src/datasource/mod.rs b/datafusion/core/src/datasource/mod.rs
index 0a22fe804..fc3e8f2d2 100644
--- a/datafusion/core/src/datasource/mod.rs
+++ b/datafusion/core/src/datasource/mod.rs
@@ -125,8 +125,8 @@ pub async fn get_statistics_with_limit(
     };
 
     let statistics = Statistics {
-        num_rows: Some(num_rows as usize),
-        total_byte_size: Some(total_byte_size as usize),
+        num_rows: Some(num_rows),
+        total_byte_size: Some(total_byte_size),
         column_statistics: column_stats,
         is_exact,
     };
@@ -167,7 +167,7 @@ fn get_col_stats(
                 None => None,
             };
             ColumnStatistics {
-                null_count: Some(null_counts[i] as usize),
+                null_count: Some(null_counts[i]),
                 max_value,
                 min_value,
                 distinct_count: None,
diff --git a/datafusion/core/src/execution/context.rs b/datafusion/core/src/execution/context.rs
index d3989b5bd..e0d2e0611 100644
--- a/datafusion/core/src/execution/context.rs
+++ b/datafusion/core/src/execution/context.rs
@@ -1630,7 +1630,7 @@ impl SessionState {
             Some(host) => format!("{}://{}", url.scheme(), host),
             None => format!("{}://", url.scheme()),
         };
-        let path = &url.as_str()[authority.len() as usize..];
+        let path = &url.as_str()[authority.len()..];
         let path = object_store::path::Path::parse(path).expect("Can't parse path");
         let store = ObjectStoreUrl::parse(authority.as_str())
             .expect("Invalid default catalog url");
@@ -2649,7 +2649,7 @@ mod tests {
         // generate a partitioned file
         for partition in 0..partition_count {
             let filename = format!("partition-{}.{}", partition, file_extension);
-            let file_path = tmp_dir.path().join(&filename);
+            let file_path = tmp_dir.path().join(filename);
             let mut file = File::create(file_path)?;
 
             // generate some data
diff --git a/datafusion/core/src/physical_plan/common.rs b/datafusion/core/src/physical_plan/common.rs
index 4c6a624e7..c00c7421e 100644
--- a/datafusion/core/src/physical_plan/common.rs
+++ b/datafusion/core/src/physical_plan/common.rs
@@ -328,8 +328,8 @@ mod tests {
                 RecordBatch::try_new(
                     Arc::clone(&schema),
                     vec![
-                        Arc::new(Float32Array::from_slice(&vec![i as f32; batch_size])),
-                        Arc::new(Float64Array::from_slice(&vec![i as f64; batch_size])),
+                        Arc::new(Float32Array::from_slice(vec![i as f32; batch_size])),
+                        Arc::new(Float64Array::from_slice(vec![i as f64; batch_size])),
                     ],
                 )
                 .unwrap()
diff --git a/datafusion/core/src/physical_plan/file_format/csv.rs b/datafusion/core/src/physical_plan/file_format/csv.rs
index 51180c0f0..faa1401d2 100644
--- a/datafusion/core/src/physical_plan/file_format/csv.rs
+++ b/datafusion/core/src/physical_plan/file_format/csv.rs
@@ -252,7 +252,7 @@ pub async fn plan_to_csv(
             for i in 0..plan.output_partitioning().partition_count() {
                 let plan = plan.clone();
                 let filename = format!("part-{}.csv", i);
-                let path = fs_path.join(&filename);
+                let path = fs_path.join(filename);
                 let file = fs::File::create(path)?;
                 let mut writer = csv::Writer::new(file);
                 let task_ctx = Arc::new(TaskContext::from(state));
@@ -539,7 +539,7 @@ mod tests {
         // generate a partitioned file
         for partition in 0..partition_count {
             let filename = format!("partition-{}.{}", partition, file_extension);
-            let file_path = tmp_dir.path().join(&filename);
+            let file_path = tmp_dir.path().join(filename);
             let mut file = File::create(file_path)?;
 
             // generate some data
diff --git a/datafusion/core/src/physical_plan/file_format/json.rs b/datafusion/core/src/physical_plan/file_format/json.rs
index ceb9e7958..409c53639 100644
--- a/datafusion/core/src/physical_plan/file_format/json.rs
+++ b/datafusion/core/src/physical_plan/file_format/json.rs
@@ -215,7 +215,7 @@ pub async fn plan_to_json(
             for i in 0..plan.output_partitioning().partition_count() {
                 let plan = plan.clone();
                 let filename = format!("part-{}.json", i);
-                let path = fs_path.join(&filename);
+                let path = fs_path.join(filename);
                 let file = fs::File::create(path)?;
                 let mut writer = json::LineDelimitedWriter::new(file);
                 let task_ctx = Arc::new(TaskContext::from(state));
diff --git a/datafusion/core/src/physical_plan/file_format/parquet.rs b/datafusion/core/src/physical_plan/file_format/parquet.rs
index cf8763b07..7a44749c8 100644
--- a/datafusion/core/src/physical_plan/file_format/parquet.rs
+++ b/datafusion/core/src/physical_plan/file_format/parquet.rs
@@ -601,7 +601,7 @@ pub async fn plan_to_parquet(
             for i in 0..plan.output_partitioning().partition_count() {
                 let plan = plan.clone();
                 let filename = format!("part-{}.parquet", i);
-                let path = fs_path.join(&filename);
+                let path = fs_path.join(filename);
                 let file = fs::File::create(path)?;
                 let mut writer =
                     ArrowWriter::try_new(file, plan.schema(), writer_properties.clone())?;
@@ -1535,7 +1535,7 @@ mod tests {
         // generate a partitioned file
         for partition in 0..partition_count {
             let filename = format!("partition-{}.{}", partition, file_extension);
-            let file_path = tmp_dir.path().join(&filename);
+            let file_path = tmp_dir.path().join(filename);
             let mut file = File::create(file_path)?;
 
             // generate some data
diff --git a/datafusion/core/src/physical_plan/file_format/parquet/row_filter.rs b/datafusion/core/src/physical_plan/file_format/parquet/row_filter.rs
index e3cdc5919..353162c7f 100644
--- a/datafusion/core/src/physical_plan/file_format/parquet/row_filter.rs
+++ b/datafusion/core/src/physical_plan/file_format/parquet/row_filter.rs
@@ -399,7 +399,7 @@ mod test {
     #[should_panic(expected = "building candidate failed")]
     fn test_filter_candidate_builder_ignore_projected_columns() {
         let testdata = crate::test_util::parquet_test_data();
-        let file = std::fs::File::open(&format!("{}/alltypes_plain.parquet", testdata))
+        let file = std::fs::File::open(format!("{}/alltypes_plain.parquet", testdata))
             .expect("opening file");
 
         let reader = SerializedFileReader::new(file).expect("creating reader");
@@ -423,7 +423,7 @@ mod test {
     #[test]
     fn test_filter_candidate_builder_ignore_complex_types() {
         let testdata = crate::test_util::parquet_test_data();
-        let file = std::fs::File::open(&format!("{}/list_columns.parquet", testdata))
+        let file = std::fs::File::open(format!("{}/list_columns.parquet", testdata))
             .expect("opening file");
 
         let reader = SerializedFileReader::new(file).expect("creating reader");
@@ -447,7 +447,7 @@ mod test {
     #[test]
     fn test_filter_candidate_builder_rewrite_missing_column() {
         let testdata = crate::test_util::parquet_test_data();
-        let file = std::fs::File::open(&format!("{}/alltypes_plain.parquet", testdata))
+        let file = std::fs::File::open(format!("{}/alltypes_plain.parquet", testdata))
             .expect("opening file");
 
         let reader = SerializedFileReader::new(file).expect("creating reader");
diff --git a/datafusion/core/src/test/mod.rs b/datafusion/core/src/test/mod.rs
index f5c37eb05..be27b2596 100644
--- a/datafusion/core/src/test/mod.rs
+++ b/datafusion/core/src/test/mod.rs
@@ -106,7 +106,7 @@ pub fn partitioned_file_groups(
                 .get_ext_with_compression(file_compression_type.to_owned())
                 .unwrap()
         );
-        let filename = tmp_dir.join(&filename);
+        let filename = tmp_dir.join(filename);
 
         let file = File::create(&filename).unwrap();
 
@@ -125,7 +125,7 @@ pub fn partitioned_file_groups(
         files.push(filename);
     }
 
-    let f = File::open(&path)?;
+    let f = File::open(path)?;
     let f = BufReader::new(f);
     for (i, line) in f.lines().enumerate() {
         let line = line.unwrap();
diff --git a/datafusion/core/tests/path_partition.rs b/datafusion/core/tests/path_partition.rs
index fca9b9a43..2d7e783f1 100644
--- a/datafusion/core/tests/path_partition.rs
+++ b/datafusion/core/tests/path_partition.rs
@@ -548,7 +548,7 @@ impl ObjectStore for MirroringObjectStore {
     ) -> object_store::Result<Bytes> {
         self.files.iter().find(|x| *x == location.as_ref()).unwrap();
         let path = std::path::PathBuf::from(&self.mirrored_file);
-        let mut file = File::open(&path).unwrap();
+        let mut file = File::open(path).unwrap();
         file.seek(SeekFrom::Start(range.start as u64)).unwrap();
 
         let to_read = range.end - range.start;
diff --git a/datafusion/core/tests/provider_filter_pushdown.rs b/datafusion/core/tests/provider_filter_pushdown.rs
index c1aa5ad70..84c8c50cd 100644
--- a/datafusion/core/tests/provider_filter_pushdown.rs
+++ b/datafusion/core/tests/provider_filter_pushdown.rs
@@ -152,7 +152,7 @@ impl TableProvider for CustomProvider {
                     Expr::Literal(ScalarValue::Int8(Some(i))) => *i as i64,
                     Expr::Literal(ScalarValue::Int16(Some(i))) => *i as i64,
                     Expr::Literal(ScalarValue::Int32(Some(i))) => *i as i64,
-                    Expr::Literal(ScalarValue::Int64(Some(i))) => *i as i64,
+                    Expr::Literal(ScalarValue::Int64(Some(i))) => *i,
                     Expr::Cast(Cast { expr, data_type: _ }) => match expr.deref() {
                         Expr::Literal(lit_value) => match lit_value {
                             ScalarValue::Int8(Some(v)) => *v as i64,
diff --git a/datafusion/core/tests/sql/aggregates.rs b/datafusion/core/tests/sql/aggregates.rs
index 8f266a29f..4b8a158fb 100644
--- a/datafusion/core/tests/sql/aggregates.rs
+++ b/datafusion/core/tests/sql/aggregates.rs
@@ -2185,7 +2185,7 @@ async fn run_count_distinct_integers_aggregated_scenario(
 
     for (i, partition) in partitions.iter().enumerate() {
         let filename = format!("partition-{}.csv", i);
-        let file_path = tmp_dir.path().join(&filename);
+        let file_path = tmp_dir.path().join(filename);
         let mut file = File::create(file_path)?;
         for row in partition {
             let row_str = format!(
diff --git a/datafusion/core/tests/sql/group_by.rs b/datafusion/core/tests/sql/group_by.rs
index 057673dda..56044862c 100644
--- a/datafusion/core/tests/sql/group_by.rs
+++ b/datafusion/core/tests/sql/group_by.rs
@@ -501,7 +501,7 @@ async fn group_by_date_trunc() -> Result<()> {
     // generate a partitioned file
     for partition in 0..4 {
         let filename = format!("partition-{}.{}", partition, "csv");
-        let file_path = tmp_dir.path().join(&filename);
+        let file_path = tmp_dir.path().join(filename);
         let mut file = File::create(file_path)?;
 
         // generate some data
diff --git a/datafusion/core/tests/sql/mod.rs b/datafusion/core/tests/sql/mod.rs
index c2d5726e2..20d5371f1 100644
--- a/datafusion/core/tests/sql/mod.rs
+++ b/datafusion/core/tests/sql/mod.rs
@@ -871,7 +871,7 @@ fn populate_csv_partitions(
     // generate a partitioned file
     for partition in 0..partition_count {
         let filename = format!("partition-{}.{}", partition, file_extension);
-        let file_path = tmp_dir.path().join(&filename);
+        let file_path = tmp_dir.path().join(filename);
         let mut file = File::create(file_path)?;
 
         // generate some data
diff --git a/datafusion/core/tests/sql/parquet_schema.rs b/datafusion/core/tests/sql/parquet_schema.rs
index 17bd88a24..9c56ab147 100644
--- a/datafusion/core/tests/sql/parquet_schema.rs
+++ b/datafusion/core/tests/sql/parquet_schema.rs
@@ -186,7 +186,7 @@ fn write_files(table_path: &Path, schemas: Vec<Schema>) {
     for (i, schema) in schemas.into_iter().enumerate() {
         let schema = Arc::new(schema);
         let filename = format!("part-{}.parquet", i);
-        let path = table_path.join(&filename);
+        let path = table_path.join(filename);
         let file = fs::File::create(path).unwrap();
         let mut writer = ArrowWriter::try_new(file, schema.clone(), None).unwrap();
 
diff --git a/datafusion/core/tests/sql/partitioned_csv.rs b/datafusion/core/tests/sql/partitioned_csv.rs
index 9b87c2f09..efb63a296 100644
--- a/datafusion/core/tests/sql/partitioned_csv.rs
+++ b/datafusion/core/tests/sql/partitioned_csv.rs
@@ -60,7 +60,7 @@ fn populate_csv_partitions(
     // generate a partitioned file
     for partition in 0..partition_count {
         let filename = format!("partition-{}.{}", partition, file_extension);
-        let file_path = tmp_dir.path().join(&filename);
+        let file_path = tmp_dir.path().join(filename);
         let mut file = std::fs::File::create(file_path)?;
 
         // generate some data
diff --git a/datafusion/optimizer/src/common_subexpr_eliminate.rs b/datafusion/optimizer/src/common_subexpr_eliminate.rs
index e8158e632..8369c12c3 100644
--- a/datafusion/optimizer/src/common_subexpr_eliminate.rs
+++ b/datafusion/optimizer/src/common_subexpr_eliminate.rs
@@ -543,7 +543,7 @@ impl ExprRewriter for CommonSubexprRewriter<'_> {
         // Alias this `Column` expr to it original "expr name",
         // `projection_push_down` optimizer use "expr name" to eliminate useless
         // projections.
-        Ok(col(id).alias(&expr_name))
+        Ok(col(id).alias(expr_name))
     }
 }
 
diff --git a/datafusion/optimizer/src/filter_push_down.rs b/datafusion/optimizer/src/filter_push_down.rs
index 674910cd1..71de72732 100644
--- a/datafusion/optimizer/src/filter_push_down.rs
+++ b/datafusion/optimizer/src/filter_push_down.rs
@@ -716,7 +716,7 @@ fn optimize(plan: &LogicalPlan, mut state: State) -> Result<LogicalPlan> {
                             // replace keys in join_cols_to_replace with values in resulting column
                             // set
                             .filter(|c| !join_cols_to_replace.contains_key(c))
-                            .chain(join_cols_to_replace.iter().map(|(_, v)| (*v).clone()))
+                            .chain(join_cols_to_replace.values().map(|v| (*v).clone()))
                             .collect();
 
                         Some(Ok((join_side_predicate, join_side_columns)))
diff --git a/datafusion/physical-expr/src/aggregate/approx_percentile_cont.rs b/datafusion/physical-expr/src/aggregate/approx_percentile_cont.rs
index e9f9f0721..750675719 100644
--- a/datafusion/physical-expr/src/aggregate/approx_percentile_cont.rs
+++ b/datafusion/physical-expr/src/aggregate/approx_percentile_cont.rs
@@ -128,7 +128,7 @@ fn validate_input_percentile_expr(expr: &Arc<dyn PhysicalExpr>) -> Result<f64> {
         .value();
     let percentile = match lit {
         ScalarValue::Float32(Some(q)) => *q as f64,
-        ScalarValue::Float64(Some(q)) => *q as f64,
+        ScalarValue::Float64(Some(q)) => *q,
         got => return Err(DataFusionError::NotImplemented(format!(
             "Percentile value for 'APPROX_PERCENTILE_CONT' must be Float32 or Float64 literal (got data type {})",
             got.get_datatype()
@@ -388,7 +388,7 @@ impl Accumulator for ApproxPercentileAccumulator {
             DataType::UInt32 => ScalarValue::UInt32(Some(q as u32)),
             DataType::UInt64 => ScalarValue::UInt64(Some(q as u64)),
             DataType::Float32 => ScalarValue::Float32(Some(q as f32)),
-            DataType::Float64 => ScalarValue::Float64(Some(q as f64)),
+            DataType::Float64 => ScalarValue::Float64(Some(q)),
             v => unreachable!("unexpected return type {:?}", v),
         })
     }
diff --git a/datafusion/physical-expr/src/aggregate/tdigest.rs b/datafusion/physical-expr/src/aggregate/tdigest.rs
index e4112d848..6457f7025 100644
--- a/datafusion/physical-expr/src/aggregate/tdigest.rs
+++ b/datafusion/physical-expr/src/aggregate/tdigest.rs
@@ -420,8 +420,7 @@ impl TDigest {
         let mut compressed: Vec<Centroid> = Vec::with_capacity(max_size);
 
         let mut k_limit: f64 = 1.0;
-        let mut q_limit_times_count =
-            Self::k_to_q(k_limit, max_size as f64) * (count as f64);
+        let mut q_limit_times_count = Self::k_to_q(k_limit, max_size as f64) * (count);
 
         let mut iter_centroids = centroids.iter_mut();
         let mut curr = iter_centroids.next().unwrap();
@@ -440,8 +439,7 @@ impl TDigest {
                 sums_to_merge = 0_f64;
                 weights_to_merge = 0_f64;
                 compressed.push(curr.clone());
-                q_limit_times_count =
-                    Self::k_to_q(k_limit, max_size as f64) * (count as f64);
+                q_limit_times_count = Self::k_to_q(k_limit, max_size as f64) * (count);
                 k_limit += 1.0;
                 curr = centroid;
             }
@@ -452,7 +450,7 @@ impl TDigest {
         compressed.shrink_to_fit();
         compressed.sort();
 
-        result.count = count as f64;
+        result.count = count;
         result.min = min;
         result.max = max;
         result.centroids = compressed;
diff --git a/datafusion/physical-expr/src/expressions/binary.rs b/datafusion/physical-expr/src/expressions/binary.rs
index 88dcaaccc..a49040530 100644
--- a/datafusion/physical-expr/src/expressions/binary.rs
+++ b/datafusion/physical-expr/src/expressions/binary.rs
@@ -2437,10 +2437,10 @@ mod tests {
         let value: i128 = 123;
         let decimal_array = Arc::new(create_decimal_array(
             &[
-                Some(value as i128), // 1.23
+                Some(value), // 1.23
                 None,
                 Some((value - 1) as i128), // 1.22
-                Some((value + 1) as i128), // 1.24
+                Some(value + 1),           // 1.24
             ],
             10,
             2,
@@ -2563,7 +2563,7 @@ mod tests {
             &[
                 Some(value as i128), // 1.23
                 None,
-                Some((value - 1) as i128), // 1.22
+                Some(value - 1),           // 1.22
                 Some((value + 1) as i128), // 1.24
             ],
             10,
@@ -2680,10 +2680,10 @@ mod tests {
         let value: i128 = 123;
         let decimal_array = Arc::new(create_decimal_array(
             &[
-                Some(value as i128), // 1.23
+                Some(value), // 1.23
                 None,
                 Some((value - 1) as i128), // 1.22
-                Some((value + 1) as i128), // 1.24
+                Some(value + 1),           // 1.24
             ],
             10,
             2,
diff --git a/datafusion/physical-expr/src/expressions/datetime.rs b/datafusion/physical-expr/src/expressions/datetime.rs
index 9585e7ea0..14e707259 100644
--- a/datafusion/physical-expr/src/expressions/datetime.rs
+++ b/datafusion/physical-expr/src/expressions/datetime.rs
@@ -373,7 +373,7 @@ mod tests {
         match res {
             ColumnarValue::Scalar(ScalarValue::Date64(Some(d))) => {
                 let epoch = NaiveDate::from_ymd_opt(1970, 1, 1).unwrap();
-                let res = epoch.add(Duration::milliseconds(d as i64));
+                let res = epoch.add(Duration::milliseconds(d));
                 assert_eq!(format!("{:?}", res).as_str(), "1969-12-16");
             }
             _ => Err(DataFusionError::NotImplemented(
diff --git a/datafusion/sql/src/planner.rs b/datafusion/sql/src/planner.rs
index dacd4af87..0fe8042e0 100644
--- a/datafusion/sql/src/planner.rs
+++ b/datafusion/sql/src/planner.rs
@@ -209,7 +209,7 @@ impl<'a, S: ContextProvider> SqlToRel<'a, S> {
                         .project(
                             plan.schema().fields().iter().zip(columns.into_iter()).map(
                                 |(field, ident)| {
-                                    col(field.name()).alias(&normalize_ident(&ident))
+                                    col(field.name()).alias(normalize_ident(&ident))
                                 },
                             ),
                         )
@@ -717,7 +717,7 @@ impl<'a, S: ContextProvider> SqlToRel<'a, S> {
             JoinConstraint::Using(idents) => {
                 let keys: Vec<Column> = idents
                     .into_iter()
-                    .map(|x| Column::from_name(&normalize_ident(&x)))
+                    .map(|x| Column::from_name(normalize_ident(&x)))
                     .collect();
                 LogicalPlanBuilder::from(left)
                     .join_using(&right, join_type, keys)?
@@ -840,7 +840,7 @@ impl<'a, S: ContextProvider> SqlToRel<'a, S> {
             Ok(LogicalPlanBuilder::from(plan.clone())
                 .project_with_alias(
                     plan.schema().fields().iter().zip(columns_alias.iter()).map(
-                        |(field, ident)| col(field.name()).alias(&normalize_ident(ident)),
+                        |(field, ident)| col(field.name()).alias(normalize_ident(ident)),
                     ),
                     Some(normalize_ident(&alias.name)),
                 )?