diff --git a/crates/polars-arrow/src/io/ipc/read/schema.rs b/crates/polars-arrow/src/io/ipc/read/schema.rs index 3ed84d3005bd..c130a957b7ff 100644 --- a/crates/polars-arrow/src/io/ipc/read/schema.rs +++ b/crates/polars-arrow/src/io/ipc/read/schema.rs @@ -363,7 +363,7 @@ pub fn deserialize_schema( message: &[u8], ) -> PolarsResult<(ArrowSchema, IpcSchema, Option)> { let message = arrow_format::ipc::MessageRef::read_as_root(message) - .map_err(|_err| polars_err!(oos = "Unable deserialize message: {err:?}"))?; + .map_err(|err| polars_err!(oos = format!("Unable deserialize message: {err:?}")))?; let schema = match message .header()? @@ -430,7 +430,7 @@ pub(super) fn fb_to_schema( pub(super) fn deserialize_stream_metadata(meta: &[u8]) -> PolarsResult { let message = arrow_format::ipc::MessageRef::read_as_root(meta) - .map_err(|_err| polars_err!(oos = "Unable to get root as message: {err:?}"))?; + .map_err(|err| polars_err!(oos = format!("Unable to get root as message: {err:?}")))?; let version = message.version()?; // message header is a Schema, so read it let header = message diff --git a/crates/polars-core/src/chunked_array/logical/categorical/revmap.rs b/crates/polars-core/src/chunked_array/logical/categorical/revmap.rs index 2e3d6f2115db..5446a6ab5129 100644 --- a/crates/polars-core/src/chunked_array/logical/categorical/revmap.rs +++ b/crates/polars-core/src/chunked_array/logical/categorical/revmap.rs @@ -68,7 +68,7 @@ impl RevMapping { val.hash(&mut hb); }); let hash = hb.finish(); - (hash as u128) << 64 | (categories.total_buffer_len() as u128) + ((hash as u128) << 64) | (categories.total_buffer_len() as u128) } pub fn build_local(categories: Utf8ViewArray) -> Self { diff --git a/crates/polars-expr/src/expressions/apply.rs b/crates/polars-expr/src/expressions/apply.rs index 7fc0739f131e..c3242547dfca 100644 --- a/crates/polars-expr/src/expressions/apply.rs +++ b/crates/polars-expr/src/expressions/apply.rs @@ -326,15 +326,10 @@ impl PhysicalExpr for ApplyExpr { self.inputs .par_iter() .map(f) - .map(|v| v.map(Column::from)) .collect::>>() }) } else { - self.inputs - .iter() - .map(f) - .map(|v| v.map(Column::from)) - .collect::>>() + self.inputs.iter().map(f).collect::>>() }?; if self.allow_rename { @@ -550,7 +545,6 @@ fn apply_multiple_elementwise<'a>( ac.flat_naive().into_owned() }) - .map(Column::from) .collect::>(); let input_len = c[0].len(); diff --git a/crates/polars-expr/src/expressions/column.rs b/crates/polars-expr/src/expressions/column.rs index 4c730663b339..0cdb3e54bbfd 100644 --- a/crates/polars-expr/src/expressions/column.rs +++ b/crates/polars-expr/src/expressions/column.rs @@ -156,12 +156,12 @@ impl PhysicalExpr for ColumnExpr { // in debug builds we panic so that it can be fixed when occurring None => { if self.name.starts_with(CSE_REPLACED) { - return self.process_cse(df, &self.schema).map(Column::from); + return self.process_cse(df, &self.schema); } self.process_by_linear_search(df, state, true) }, }; - self.check_external_context(out, state).map(Column::from) + self.check_external_context(out, state) } #[allow(clippy::ptr_arg)] diff --git a/crates/polars-expr/src/expressions/window.rs b/crates/polars-expr/src/expressions/window.rs index b799d55467e5..dd92e8318a61 100644 --- a/crates/polars-expr/src/expressions/window.rs +++ b/crates/polars-expr/src/expressions/window.rs @@ -406,7 +406,7 @@ impl PhysicalExpr for WindowExpr { let group_by_columns = self .group_by .iter() - .map(|e| e.evaluate(df, state).map(Column::from)) + .map(|e| e.evaluate(df, state)) .collect::>>()?; // if the keys are sorted @@ -551,7 +551,6 @@ impl PhysicalExpr for WindowExpr { state, &cache_key, ) - .map(Column::from) }, Join => { let out_column = ac.aggregated(); diff --git a/crates/polars-io/src/partition.rs b/crates/polars-io/src/partition.rs index ce64a4cb45e7..2e3499fe57aa 100644 --- a/crates/polars-io/src/partition.rs +++ b/crates/polars-io/src/partition.rs @@ -39,10 +39,7 @@ fn write_partitioned_dataset_impl( where W: WriteDataFrameToFile + Send + Sync, { - let partition_by = partition_by - .into_iter() - .map(Into::into) - .collect::>(); + let partition_by = partition_by.into_iter().collect::>(); // Ensure we have a single chunk as the gather will otherwise rechunk per group. df.as_single_chunk_par(); diff --git a/crates/polars-mem-engine/src/executors/group_by.rs b/crates/polars-mem-engine/src/executors/group_by.rs index f7a501424ed9..09dcae659fee 100644 --- a/crates/polars-mem-engine/src/executors/group_by.rs +++ b/crates/polars-mem-engine/src/executors/group_by.rs @@ -89,7 +89,7 @@ pub(super) fn group_by_helper( rayon::join(get_columns, get_agg) }); - columns.extend(agg_columns?.into_iter().map(Column::from)); + columns.extend(agg_columns?); DataFrame::new(columns) } @@ -98,7 +98,7 @@ impl GroupByExec { let keys = self .keys .iter() - .map(|e| e.evaluate(&df, state).map(Column::from)) + .map(|e| e.evaluate(&df, state)) .collect::>()?; group_by_helper( df, diff --git a/crates/polars-mem-engine/src/executors/group_by_dynamic.rs b/crates/polars-mem-engine/src/executors/group_by_dynamic.rs index b5f98666d281..2a3fe16c08cd 100644 --- a/crates/polars-mem-engine/src/executors/group_by_dynamic.rs +++ b/crates/polars-mem-engine/src/executors/group_by_dynamic.rs @@ -25,7 +25,7 @@ impl GroupByDynamicExec { let keys = self .keys .iter() - .map(|e| e.evaluate(&df, state).map(Column::from)) + .map(|e| e.evaluate(&df, state)) .collect::>>()?; let (mut time_key, mut keys, groups) = df.group_by_dynamic(keys, &self.options)?; @@ -63,7 +63,7 @@ impl GroupByDynamicExec { let mut columns = Vec::with_capacity(agg_columns.len() + 1 + keys.len()); columns.extend_from_slice(&keys); columns.push(time_key); - columns.extend(agg_columns.into_iter().map(Column::from)); + columns.extend(agg_columns); DataFrame::new(columns) } diff --git a/crates/polars-mem-engine/src/executors/group_by_partitioned.rs b/crates/polars-mem-engine/src/executors/group_by_partitioned.rs index 61cb9b10bc52..ad5d647fdb33 100644 --- a/crates/polars-mem-engine/src/executors/group_by_partitioned.rs +++ b/crates/polars-mem-engine/src/executors/group_by_partitioned.rs @@ -58,9 +58,7 @@ fn compute_keys( df: &DataFrame, state: &ExecutionState, ) -> PolarsResult> { - keys.iter() - .map(|s| s.evaluate(df, state).map(Column::from)) - .collect() + keys.iter().map(|s| s.evaluate(df, state)).collect() } fn run_partitions( @@ -154,7 +152,6 @@ fn estimate_unique_count(keys: &[Column], mut sample_size: usize) -> PolarsResul let keys = keys .iter() .map(|s| s.slice(offset, sample_size)) - .map(Column::from) .collect::>(); let df = unsafe { DataFrame::new_no_checks_height_from_first(keys) }; let names = df.get_column_names().into_iter().cloned(); @@ -331,9 +328,7 @@ impl PartitionGroupByExec { .zip(&df.get_columns()[self.phys_keys.len()..]) .map(|(expr, partitioned_s)| { let agg_expr = expr.as_partitioned_aggregator().unwrap(); - agg_expr - .finalize(partitioned_s.clone(), groups, state) - .map(Column::from) + agg_expr.finalize(partitioned_s.clone(), groups, state) }) .collect(); diff --git a/crates/polars-mem-engine/src/executors/group_by_rolling.rs b/crates/polars-mem-engine/src/executors/group_by_rolling.rs index 8ad2352572a0..5d9068f13de7 100644 --- a/crates/polars-mem-engine/src/executors/group_by_rolling.rs +++ b/crates/polars-mem-engine/src/executors/group_by_rolling.rs @@ -48,7 +48,7 @@ impl GroupByRollingExec { let keys = self .keys .iter() - .map(|e| e.evaluate(&df, state).map(Column::from)) + .map(|e| e.evaluate(&df, state)) .collect::>>()?; let (mut time_key, mut keys, groups) = df.rolling(keys, &self.options)?; @@ -85,7 +85,7 @@ impl GroupByRollingExec { let mut columns = Vec::with_capacity(agg_columns.len() + 1 + keys.len()); columns.extend_from_slice(&keys); columns.push(time_key); - columns.extend(agg_columns.into_iter().map(Column::from)); + columns.extend(agg_columns); DataFrame::new(columns) } diff --git a/crates/polars-mem-engine/src/executors/projection_utils.rs b/crates/polars-mem-engine/src/executors/projection_utils.rs index 01dc5f362fd9..17892fb17791 100644 --- a/crates/polars-mem-engine/src/executors/projection_utils.rs +++ b/crates/polars-mem-engine/src/executors/projection_utils.rs @@ -340,10 +340,7 @@ pub(super) fn check_expand_literals( } // @scalar-opt - let selected_columns = selected_columns - .into_iter() - .map(Column::from) - .collect::>(); + let selected_columns = selected_columns.into_iter().collect::>(); let df = unsafe { DataFrame::new_no_checks_height_from_first(selected_columns) }; diff --git a/crates/polars-mem-engine/src/executors/stack.rs b/crates/polars-mem-engine/src/executors/stack.rs index e48d7438e23c..62e1ffb7fdfe 100644 --- a/crates/polars-mem-engine/src/executors/stack.rs +++ b/crates/polars-mem-engine/src/executors/stack.rs @@ -65,7 +65,7 @@ impl StackExec { // new, unique column names. It is immediately // followed by a projection which pulls out the // possibly mismatching column lengths. - unsafe { df.column_extend_unchecked(res.into_iter().map(Column::from)) }; + unsafe { df.column_extend_unchecked(res) }; } else { let (df_height, df_width) = df.shape(); diff --git a/crates/polars-ops/src/series/ops/duration.rs b/crates/polars-ops/src/series/ops/duration.rs index 2c8f0ae022e5..85adfe6e7117 100644 --- a/crates/polars-ops/src/series/ops/duration.rs +++ b/crates/polars-ops/src/series/ops/duration.rs @@ -87,7 +87,5 @@ pub fn impl_duration(s: &[Column], time_unit: TimeUnit) -> PolarsResult duration = (duration + weeks * multiplier * SECONDS_IN_DAY * 7)?; } - duration - .cast(&DataType::Duration(time_unit)) - .map(Column::from) + duration.cast(&DataType::Duration(time_unit)) } diff --git a/crates/polars-parquet/src/parquet/encoding/hybrid_rle/encoder.rs b/crates/polars-parquet/src/parquet/encoding/hybrid_rle/encoder.rs index 7e1858e44979..7a15d1fac520 100644 --- a/crates/polars-parquet/src/parquet/encoding/hybrid_rle/encoder.rs +++ b/crates/polars-parquet/src/parquet/encoding/hybrid_rle/encoder.rs @@ -244,7 +244,7 @@ mod tests { encode::(&mut vec, iter, 1)?; - assert_eq!(vec, vec![(2 << 1 | 1), 0b10011101u8, 0b00011101]); + assert_eq!(vec, vec![((2 << 1) | 1), 0b10011101u8, 0b00011101]); Ok(()) } @@ -259,7 +259,7 @@ mod tests { 1, )?; - assert_eq!(vec, vec![(1 << 1 | 1), 0b11111111]); + assert_eq!(vec, vec![((1 << 1) | 1), 0b11111111]); Ok(()) } @@ -272,7 +272,7 @@ mod tests { assert_eq!( vec, vec![ - (2 << 1 | 1), + ((2 << 1) | 1), 0b01_10_01_00, 0b00_01_01_10, 0b_00_00_00_11, @@ -294,7 +294,7 @@ mod tests { let expected = 0b11_10_01_00u8; let mut expected = vec![expected; length / 4]; - expected.insert(0, ((length / 8) as u8) << 1 | 1); + expected.insert(0, (((length / 8) as u8) << 1) | 1); assert_eq!(vec, expected); Ok(()) diff --git a/crates/polars-plan/src/dsl/function_expr/datetime.rs b/crates/polars-plan/src/dsl/function_expr/datetime.rs index 30db9d0ffa04..4bdfd28ed7ee 100644 --- a/crates/polars-plan/src/dsl/function_expr/datetime.rs +++ b/crates/polars-plan/src/dsl/function_expr/datetime.rs @@ -269,7 +269,6 @@ pub(super) fn time(s: &Column) -> PolarsResult { DataType::Time => Ok(s.clone()), dtype => polars_bail!(ComputeError: "expected Datetime or Time, got {}", dtype), } - .map(Column::from) } pub(super) fn date(s: &Column) -> PolarsResult { match s.dtype() { diff --git a/crates/polars-plan/src/dsl/function_expr/range/datetime_range.rs b/crates/polars-plan/src/dsl/function_expr/range/datetime_range.rs index e220a7107435..2b0fdc1e9cc8 100644 --- a/crates/polars-plan/src/dsl/function_expr/range/datetime_range.rs +++ b/crates/polars-plan/src/dsl/function_expr/range/datetime_range.rs @@ -220,7 +220,7 @@ pub(super) fn datetime_ranges( }; let to_type = DataType::List(Box::new(dtype)); - out.cast(&to_type).map(Column::from) + out.cast(&to_type) } impl FieldsMapper<'_> { diff --git a/crates/polars-plan/src/dsl/function_expr/range/time_range.rs b/crates/polars-plan/src/dsl/function_expr/range/time_range.rs index e339105bee3f..dd55e4868ab2 100644 --- a/crates/polars-plan/src/dsl/function_expr/range/time_range.rs +++ b/crates/polars-plan/src/dsl/function_expr/range/time_range.rs @@ -62,5 +62,5 @@ pub(super) fn time_ranges( let out = temporal_ranges_impl_broadcast(start, end, range_impl, &mut builder)?; let to_type = DataType::List(Box::new(DataType::Time)); - out.cast(&to_type).map(Column::from) + out.cast(&to_type) } diff --git a/crates/polars-plan/src/dsl/mod.rs b/crates/polars-plan/src/dsl/mod.rs index ef27dc3966b9..f831cac12c6f 100644 --- a/crates/polars-plan/src/dsl/mod.rs +++ b/crates/polars-plan/src/dsl/mod.rs @@ -1516,7 +1516,7 @@ impl Expr { .map(|ca| ca.into_column()), }?; if let DataType::Float32 = c.dtype() { - out.cast(&DataType::Float32).map(Column::from).map(Some) + out.cast(&DataType::Float32).map(Some) } else { Ok(Some(out)) } diff --git a/crates/polars-python/src/lib.rs b/crates/polars-python/src/lib.rs index dee0efe4a710..640e9d5d7785 100644 --- a/crates/polars-python/src/lib.rs +++ b/crates/polars-python/src/lib.rs @@ -3,6 +3,7 @@ #![allow(non_local_definitions)] #![allow(clippy::too_many_arguments)] // Python functions can have many arguments due to default arguments #![allow(clippy::disallowed_types)] +#![allow(clippy::useless_conversion)] // Needed for now due to https://github.com/PyO3/pyo3/issues/4828. #[cfg(feature = "csv")] pub mod batched_csv; diff --git a/crates/polars-python/src/map/dataframe.rs b/crates/polars-python/src/map/dataframe.rs index 203b54bbd39b..068dbfd3c996 100644 --- a/crates/polars-python/src/map/dataframe.rs +++ b/crates/polars-python/src/map/dataframe.rs @@ -267,9 +267,9 @@ pub fn apply_lambda_with_list_out_type<'a>( if val.is_none() { Ok(None) } else { - Err(PyValueError::new_err( - "should return a Series, got a {val:?}", - )) + Err(PyValueError::new_err(format!( + "should return a Series, got a {val:?}" + ))) } }, } diff --git a/crates/polars-stream/src/physical_plan/lower_expr.rs b/crates/polars-stream/src/physical_plan/lower_expr.rs index 1df9b0e35d51..3d9f87b14487 100644 --- a/crates/polars-stream/src/physical_plan/lower_expr.rs +++ b/crates/polars-stream/src/physical_plan/lower_expr.rs @@ -2,7 +2,7 @@ use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::Arc; use polars_core::frame::DataFrame; -use polars_core::prelude::{Column, Field, InitHashMaps, PlHashMap, PlHashSet}; +use polars_core::prelude::{Field, InitHashMaps, PlHashMap, PlHashSet}; use polars_core::schema::{Schema, SchemaExt}; use polars_error::PolarsResult; use polars_expr::planner::get_expr_depth_limit; @@ -283,7 +283,7 @@ fn build_fallback_node_with_ctx( let exec_state = ExecutionState::new(); let columns = phys_exprs .iter() - .map(|phys_expr| phys_expr.evaluate(&df, &exec_state).map(Column::from)) + .map(|phys_expr| phys_expr.evaluate(&df, &exec_state)) .try_collect()?; DataFrame::new_with_broadcast(columns) }; diff --git a/crates/polars-utils/src/index.rs b/crates/polars-utils/src/index.rs index 80ac6d8ebdb2..558402bb47ed 100644 --- a/crates/polars-utils/src/index.rs +++ b/crates/polars-utils/src/index.rs @@ -218,7 +218,7 @@ impl ChunkId { #[allow(clippy::unnecessary_cast)] pub fn store(chunk: IdxSize, row: IdxSize) -> Self { debug_assert!(chunk < !(u64::MAX << CHUNK_BITS) as IdxSize); - let swizzled = (row as u64) << CHUNK_BITS | chunk as u64; + let swizzled = ((row as u64) << CHUNK_BITS) | chunk as u64; Self { swizzled } } diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 5511df850a96..25282aed0fb5 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,2 +1,2 @@ [toolchain] -channel = "nightly-2024-12-19" +channel = "nightly-2025-01-05"