This is an automated email from the ASF dual-hosted git repository.
tustvold pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/arrow-rs.git
The following commit(s) were added to refs/heads/master by this push:
new 2c4bc5449f Upgrade to Rust 1.73.0 (#4899)
2c4bc5449f is described below
commit 2c4bc5449fc4432ecb2f9963994ac8997b64b52e
Author: Carol (Nichols || Goulding)
<[email protected]>
AuthorDate: Fri Oct 6 09:55:38 2023 -0400
Upgrade to Rust 1.73.0 (#4899)
* fix: Call Ord's implementation from PartialOrd so they stay in sync
As recommended by Clippy in Rust 1.73.0
<https://rust-lang.github.io/rust-clippy/master/index.html#/incorrect_partial_ord_impl_on_ord_type>
* fix: Use or_default methods instead of or_else(default)
As recommended by Clippy in Rust 1.73.0
<https://rust-lang.github.io/rust-clippy/master/index.html#/unwrap_or_default>
* fix: Use filter then map with bools instead of filter_map then
As recommended by Clippy in Rust 1.73.0
<https://rust-lang.github.io/rust-clippy/master/index.html#/filter_map_bool_then>
* fix: Change a match guard to a pattern
As recommended by Clippy in Rust 1.73.0
<https://rust-lang.github.io/rust-clippy/master/index.html#/redundant_guards>
* fix: Change to a different kind of filter_map
Co-authored-by: Raphael Taylor-Davies
<[email protected]>
---------
Co-authored-by: Raphael Taylor-Davies
<[email protected]>
---
arrow-row/src/lib.rs | 4 ++--
object_store/src/azure/client.rs | 2 +-
object_store/src/azure/credential.rs | 8 +++----
parquet/src/arrow/async_reader/mod.rs | 45 +++++++++++++++++------------------
parquet/src/arrow/buffer/bit_util.rs | 2 +-
parquet/src/arrow/schema/primitive.rs | 4 ++--
parquet/src/file/properties.rs | 4 +---
7 files changed, 32 insertions(+), 37 deletions(-)
diff --git a/arrow-row/src/lib.rs b/arrow-row/src/lib.rs
index 58dc42a4ca..1fb4e1de7a 100644
--- a/arrow-row/src/lib.rs
+++ b/arrow-row/src/lib.rs
@@ -989,7 +989,7 @@ impl<'a> Eq for Row<'a> {}
impl<'a> PartialOrd for Row<'a> {
#[inline]
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
- self.data.partial_cmp(other.data)
+ Some(self.cmp(other))
}
}
@@ -1049,7 +1049,7 @@ impl Eq for OwnedRow {}
impl PartialOrd for OwnedRow {
#[inline]
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
- self.row().partial_cmp(&other.row())
+ Some(self.cmp(other))
}
}
diff --git a/object_store/src/azure/client.rs b/object_store/src/azure/client.rs
index e18135c2c7..cd1a3a10fc 100644
--- a/object_store/src/azure/client.rs
+++ b/object_store/src/azure/client.rs
@@ -372,7 +372,7 @@ struct ListResultInternal {
}
fn to_list_result(value: ListResultInternal, prefix: Option<&str>) ->
Result<ListResult> {
- let prefix = prefix.map(Path::from).unwrap_or_else(Path::default);
+ let prefix = prefix.map(Path::from).unwrap_or_default();
let common_prefixes = value
.blobs
.blob_prefix
diff --git a/object_store/src/azure/credential.rs
b/object_store/src/azure/credential.rs
index fd75389249..8dc61365fa 100644
--- a/object_store/src/azure/credential.rs
+++ b/object_store/src/azure/credential.rs
@@ -234,11 +234,9 @@ fn string_to_sign(h: &HeaderMap, u: &Url, method: &Method,
account: &str) -> Str
fn canonicalize_header(headers: &HeaderMap) -> String {
let mut names = headers
.iter()
- .filter_map(|(k, _)| {
- (k.as_str().starts_with("x-ms"))
- // TODO remove unwraps
- .then(|| (k.as_str(),
headers.get(k).unwrap().to_str().unwrap()))
- })
+ .filter(|&(k, _)| (k.as_str().starts_with("x-ms")))
+ // TODO remove unwraps
+ .map(|(k, _)| (k.as_str(), headers.get(k).unwrap().to_str().unwrap()))
.collect::<Vec<_>>();
names.sort_unstable();
diff --git a/parquet/src/arrow/async_reader/mod.rs
b/parquet/src/arrow/async_reader/mod.rs
index 7d30580ece..c749d4deeb 100644
--- a/parquet/src/arrow/async_reader/mod.rs
+++ b/parquet/src/arrow/async_reader/mod.rs
@@ -625,27 +625,27 @@ impl<'a> InMemoryRowGroup<'a> {
.iter()
.zip(self.metadata.columns())
.enumerate()
- .filter_map(|(idx, (chunk, chunk_meta))| {
- (chunk.is_none() && projection.leaf_included(idx)).then(||
{
- // If the first page does not start at the beginning
of the column,
- // then we need to also fetch a dictionary page.
- let mut ranges = vec![];
- let (start, _len) = chunk_meta.byte_range();
- match page_locations[idx].first() {
- Some(first) if first.offset as u64 != start => {
- ranges.push(start as usize..first.offset as
usize);
- }
- _ => (),
+ .filter(|&(idx, (chunk, _chunk_meta))| {
+ chunk.is_none() && projection.leaf_included(idx)
+ })
+ .flat_map(|(idx, (_chunk, chunk_meta))| {
+ // If the first page does not start at the beginning of
the column,
+ // then we need to also fetch a dictionary page.
+ let mut ranges = vec![];
+ let (start, _len) = chunk_meta.byte_range();
+ match page_locations[idx].first() {
+ Some(first) if first.offset as u64 != start => {
+ ranges.push(start as usize..first.offset as usize);
}
+ _ => (),
+ }
-
ranges.extend(selection.scan_ranges(&page_locations[idx]));
- page_start_offsets
- .push(ranges.iter().map(|range|
range.start).collect());
+ ranges.extend(selection.scan_ranges(&page_locations[idx]));
+ page_start_offsets
+ .push(ranges.iter().map(|range|
range.start).collect());
- ranges
- })
+ ranges
})
- .flatten()
.collect();
let mut chunk_data =
input.get_byte_ranges(fetch_ranges).await?.into_iter();
@@ -673,12 +673,11 @@ impl<'a> InMemoryRowGroup<'a> {
.column_chunks
.iter()
.enumerate()
- .filter_map(|(idx, chunk)| {
- (chunk.is_none() && projection.leaf_included(idx)).then(||
{
- let column = self.metadata.column(idx);
- let (start, length) = column.byte_range();
- start as usize..(start + length) as usize
- })
+ .filter(|&(idx, chunk)| chunk.is_none() &&
projection.leaf_included(idx))
+ .map(|(idx, _chunk)| {
+ let column = self.metadata.column(idx);
+ let (start, length) = column.byte_range();
+ start as usize..(start + length) as usize
})
.collect();
diff --git a/parquet/src/arrow/buffer/bit_util.rs
b/parquet/src/arrow/buffer/bit_util.rs
index d01556d24e..b8e2e2f539 100644
--- a/parquet/src/arrow/buffer/bit_util.rs
+++ b/parquet/src/arrow/buffer/bit_util.rs
@@ -84,7 +84,7 @@ mod tests {
.iter()
.enumerate()
.rev()
- .filter_map(|(x, y)| y.then(|| x))
+ .filter_map(|(x, y)| y.then_some(x))
.collect();
assert_eq!(actual, expected);
diff --git a/parquet/src/arrow/schema/primitive.rs
b/parquet/src/arrow/schema/primitive.rs
index 83d84b77ec..7d8b6a04ee 100644
--- a/parquet/src/arrow/schema/primitive.rs
+++ b/parquet/src/arrow/schema/primitive.rs
@@ -193,11 +193,11 @@ fn from_int64(info: &BasicTypeInfo, scale: i32,
precision: i32) -> Result<DataTy
(None, ConvertedType::NONE) => Ok(DataType::Int64),
(
Some(LogicalType::Integer {
- bit_width,
+ bit_width: 64,
is_signed,
}),
_,
- ) if bit_width == 64 => match is_signed {
+ ) => match is_signed {
true => Ok(DataType::Int64),
false => Ok(DataType::UInt64),
},
diff --git a/parquet/src/file/properties.rs b/parquet/src/file/properties.rs
index 3d6390c036..c83fea3f9b 100644
--- a/parquet/src/file/properties.rs
+++ b/parquet/src/file/properties.rs
@@ -550,9 +550,7 @@ impl WriterPropertiesBuilder {
/// Helper method to get existing or new mutable reference of column
properties.
#[inline]
fn get_mut_props(&mut self, col: ColumnPath) -> &mut ColumnProperties {
- self.column_properties
- .entry(col)
- .or_insert_with(Default::default)
+ self.column_properties.entry(col).or_default()
}
/// Sets encoding for a column.