This is an automated email from the ASF dual-hosted git repository.
raulcd pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/arrow.git
The following commit(s) were added to refs/heads/main by this push:
new 984519dd77 GH-45129: [Python][C++] Fix usage of deprecated C++
functionality on pyarrow (#45189)
984519dd77 is described below
commit 984519dd77629646f42a3bb6946b201ba4108c8e
Author: Raúl Cumplido <[email protected]>
AuthorDate: Tue Jan 21 18:26:19 2025 +0100
GH-45129: [Python][C++] Fix usage of deprecated C++ functionality on
pyarrow (#45189)
### Rationale for this change
We are using two C++ deprecated APIs:
- we are using decimal instead of smallest_decimal
- we are using Arrow:Status on GetRecordBatchReader instead of Arrow::Result
### What changes are included in this PR?
Update code to use non deprecated functions.
### Are these changes tested?
Yes via CI with existing tests.
### Are there any user-facing changes?
No
* GitHub Issue: #45129
Authored-by: Raúl Cumplido <[email protected]>
Signed-off-by: Raúl Cumplido <[email protected]>
---
python/pyarrow/_parquet.pxd | 8 +++-----
python/pyarrow/_parquet.pyx | 8 ++++----
python/pyarrow/src/arrow/python/python_test.cc | 4 ++--
3 files changed, 9 insertions(+), 11 deletions(-)
diff --git a/python/pyarrow/_parquet.pxd b/python/pyarrow/_parquet.pxd
index 71e93ce0a4..c17c3b70d7 100644
--- a/python/pyarrow/_parquet.pxd
+++ b/python/pyarrow/_parquet.pxd
@@ -484,11 +484,9 @@ cdef extern from "parquet/arrow/reader.h" namespace
"parquet::arrow" nogil:
const vector[int]& column_indices,
shared_ptr[CTable]* out)
- CStatus GetRecordBatchReader(const vector[int]& row_group_indices,
- const vector[int]& column_indices,
- unique_ptr[CRecordBatchReader]* out)
- CStatus GetRecordBatchReader(const vector[int]& row_group_indices,
- unique_ptr[CRecordBatchReader]* out)
+ CResult[unique_ptr[CRecordBatchReader]] GetRecordBatchReader(const
vector[int]& row_group_indices,
+ const
vector[int]& column_indices)
+ CResult[unique_ptr[CRecordBatchReader]] GetRecordBatchReader(const
vector[int]& row_group_indices)
CStatus ReadTable(shared_ptr[CTable]* out)
CStatus ReadTable(const vector[int]& column_indices,
diff --git a/python/pyarrow/_parquet.pyx b/python/pyarrow/_parquet.pyx
index a3abf1865b..2fb1e41641 100644
--- a/python/pyarrow/_parquet.pyx
+++ b/python/pyarrow/_parquet.pyx
@@ -1616,16 +1616,16 @@ cdef class ParquetReader(_Weakrefable):
for index in column_indices:
c_column_indices.push_back(index)
with nogil:
- check_status(
+ recordbatchreader = GetResultValue(
self.reader.get().GetRecordBatchReader(
- c_row_groups, c_column_indices, &recordbatchreader
+ c_row_groups, c_column_indices
)
)
else:
with nogil:
- check_status(
+ recordbatchreader = GetResultValue(
self.reader.get().GetRecordBatchReader(
- c_row_groups, &recordbatchreader
+ c_row_groups
)
)
diff --git a/python/pyarrow/src/arrow/python/python_test.cc
b/python/pyarrow/src/arrow/python/python_test.cc
index eea6bf9459..f988f8da31 100644
--- a/python/pyarrow/src/arrow/python/python_test.cc
+++ b/python/pyarrow/src/arrow/python/python_test.cc
@@ -663,7 +663,7 @@ Status TestDecimal128OverflowFails() {
ASSERT_EQ(38, metadata.precision());
ASSERT_EQ(1, metadata.scale());
- auto type = ::arrow::decimal(38, 38);
+ auto type = ::arrow::smallest_decimal(38, 38);
const auto& decimal_type = checked_cast<const DecimalType&>(*type);
ASSERT_RAISES(Invalid,
internal::DecimalFromPythonDecimal(python_decimal,
decimal_type, &value));
@@ -689,7 +689,7 @@ Status TestDecimal256OverflowFails() {
ASSERT_EQ(76, metadata.precision());
ASSERT_EQ(1, metadata.scale());
- auto type = ::arrow::decimal(76, 76);
+ auto type = ::arrow::smallest_decimal(76, 76);
const auto& decimal_type = checked_cast<const DecimalType&>(*type);
ASSERT_RAISES(Invalid,
internal::DecimalFromPythonDecimal(python_decimal,
decimal_type, &value));