This is an automated email from the ASF dual-hosted git repository.

guanmingchiu pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/mahout.git


The following commit(s) were added to refs/heads/main by this push:
     new d5fc18bd0 feat: enhance test skipping logic (#925)
d5fc18bd0 is described below

commit d5fc18bd01bd4fbaa6c947252b142f38c9d8b720
Author: Ryan Huang <[email protected]>
AuthorDate: Sun Jan 25 00:21:04 2026 +0800

    feat: enhance test skipping logic (#925)
    
    * Now when _qdp is not built, the tests will show as skipped with a helpful 
message instead of being silently ignored.
    
    * fix pre-commit
    
    * bring back
    
    * add -rs
    
    * linter
    
    * feat: add requires_qdp marker for conditional QDP tests and refactor 
imports
---
 pyproject.toml                    |  2 +-
 testing/conftest.py               |  9 +++-----
 testing/qdp/qdp_test_utils.py     | 37 ++++++++++++++++++++++++++++++
 testing/qdp/test_bindings.py      | 48 ++++++++++++++++++++++++++++++++++++---
 testing/qdp/test_high_fidelity.py | 14 +++++++++++-
 testing/qdp/test_numpy.py         | 20 +++++++++++++++-
 6 files changed, 118 insertions(+), 12 deletions(-)

diff --git a/pyproject.toml b/pyproject.toml
index 20ad75887..47337c697 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -45,7 +45,7 @@ qumat-qdp = { path = "qdp/qdp-python", editable = true }
 testpaths = ["testing"]
 python_files = "test_*.py"
 python_functions = "test_*"
-addopts = ["-v", "--tb=short"]
+addopts = ["-v", "--tb=short", "-rs"]
 markers = [
     "gpu: marks tests as requiring GPU and _qdp extension (auto-skipped if 
unavailable)",
     "slow: marks tests as slow running",
diff --git a/testing/conftest.py b/testing/conftest.py
index fbff7eba5..2099a28a1 100644
--- a/testing/conftest.py
+++ b/testing/conftest.py
@@ -26,11 +26,13 @@ QDP tests are automatically skipped if the _qdp extension 
is not available,
 allowing contributors without CUDA to run the qumat test suite.
 """
 
+from typing import Optional
+
 import pytest
 
 # Check if QDP extension is available at module load time
 _QDP_AVAILABLE = False
-_QDP_IMPORT_ERROR: str | None = "No module named '_qdp'"
+_QDP_IMPORT_ERROR: Optional[str] = "No module named '_qdp'"
 try:
     import _qdp  # noqa: F401, PLC0415
 
@@ -39,11 +41,6 @@ try:
 except ImportError as e:
     _QDP_IMPORT_ERROR = str(e)
 
-# Skip qdp tests at collection time if dependencies are not available
-collect_ignore_glob = []
-if not _QDP_AVAILABLE:
-    collect_ignore_glob.append("qdp/*.py")
-
 
 def pytest_configure(config):  # noqa: ARG001
     """Register custom pytest markers."""
diff --git a/testing/qdp/qdp_test_utils.py b/testing/qdp/qdp_test_utils.py
new file mode 100644
index 000000000..855eec441
--- /dev/null
+++ b/testing/qdp/qdp_test_utils.py
@@ -0,0 +1,37 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Shared pytest fixtures and markers for QDP tests."""
+
+import pytest
+
+QDP_SKIP_REASON = (
+    "QDP extension not built. Run: uv run --active maturin develop "
+    "--manifest-path qdp/qdp-python/Cargo.toml"
+)
+
+
+def _qdp_available():
+    """Check if QDP extension is available."""
+    try:
+        import _qdp
+
+        return _qdp is not None
+    except ImportError:
+        return False
+
+
+requires_qdp = pytest.mark.skipif(not _qdp_available(), reason=QDP_SKIP_REASON)
diff --git a/testing/qdp/test_bindings.py b/testing/qdp/test_bindings.py
index 56b2da7b2..58f5d5af6 100644
--- a/testing/qdp/test_bindings.py
+++ b/testing/qdp/test_bindings.py
@@ -18,7 +18,8 @@
 
 import pytest
 import torch
-import _qdp
+
+from .qdp_test_utils import requires_qdp
 
 
 def _has_multi_gpu():
@@ -31,8 +32,11 @@ def _has_multi_gpu():
         return False
 
 
+@requires_qdp
 def test_import():
     """Test that PyO3 bindings are properly imported."""
+    import _qdp
+
     assert hasattr(_qdp, "QdpEngine")
     assert hasattr(_qdp, "QuantumTensor")
 
@@ -43,17 +47,19 @@ def test_import():
     assert callable(getattr(QdpEngine, "encode_from_tensorflow"))
 
 
+@requires_qdp
 @pytest.mark.gpu
 def test_encode():
     """Test encoding returns QuantumTensor (requires GPU)."""
-    from _qdp import QdpEngine
+    from _qdp import QdpEngine, QuantumTensor
 
     engine = QdpEngine(0)
     data = [0.5, 0.5, 0.5, 0.5]
     qtensor = engine.encode(data, 2, "amplitude")
-    assert isinstance(qtensor, _qdp.QuantumTensor)
+    assert isinstance(qtensor, QuantumTensor)
 
 
+@requires_qdp
 @pytest.mark.gpu
 def test_dlpack_device():
     """Test __dlpack_device__ method (requires GPU)."""
@@ -67,6 +73,7 @@ def test_dlpack_device():
     assert device_info == (2, 0), "Expected (2, 0) for CUDA device 0"
 
 
+@requires_qdp
 @pytest.mark.gpu
 @pytest.mark.skipif(
     not _has_multi_gpu(), reason="Multi-GPU setup required for this test"
@@ -97,6 +104,7 @@ def test_dlpack_device_id_non_zero():
     )
 
 
+@requires_qdp
 @pytest.mark.gpu
 def test_dlpack_single_use():
     """Test that __dlpack__ can only be called once (requires GPU)."""
@@ -117,6 +125,7 @@ def test_dlpack_single_use():
         qtensor2.__dlpack__()
 
 
+@requires_qdp
 @pytest.mark.gpu
 def test_pytorch_integration():
     """Test PyTorch integration via DLPack (requires GPU and PyTorch)."""
@@ -138,6 +147,7 @@ def test_pytorch_integration():
     assert torch_tensor.shape == (1, 4)
 
 
+@requires_qdp
 @pytest.mark.gpu
 @pytest.mark.parametrize(
     "precision,expected_dtype",
@@ -163,6 +173,7 @@ def test_precision(precision, expected_dtype):
     )
 
 
+@requires_qdp
 @pytest.mark.gpu
 @pytest.mark.parametrize(
     "data_shape,expected_shape",
@@ -196,6 +207,7 @@ def test_encode_tensor_cpu(data_shape, expected_shape):
     assert torch_tensor.shape == expected_shape
 
 
+@requires_qdp
 @pytest.mark.gpu
 def test_encode_from_tensorflow_binding():
     """Test TensorFlow TensorProto binding path (requires GPU and 
TensorFlow)."""
@@ -231,6 +243,7 @@ def test_encode_from_tensorflow_binding():
             os.remove(pb_path)
 
 
+@requires_qdp
 @pytest.mark.gpu
 def test_encode_errors():
     """Test error handling for unified encode method."""
@@ -252,6 +265,7 @@ def test_encode_errors():
         engine.encode({"key": "value"}, 2, "amplitude")
 
 
+@requires_qdp
 @pytest.mark.gpu
 @pytest.mark.parametrize(
     "data_shape,expected_shape,expected_batch_size",
@@ -290,6 +304,7 @@ def test_encode_cuda_tensor(data_shape, expected_shape, 
expected_batch_size):
         assert torch.isclose(norm, torch.tensor(1.0, device="cuda:0"), 
atol=1e-6)
 
 
+@requires_qdp
 @pytest.mark.gpu
 def test_encode_cuda_tensor_wrong_dtype():
     """Test error when CUDA tensor has wrong dtype (non-float64)."""
@@ -308,6 +323,7 @@ def test_encode_cuda_tensor_wrong_dtype():
         engine.encode(data, 2, "amplitude")
 
 
+@requires_qdp
 @pytest.mark.gpu
 def test_encode_cuda_tensor_non_contiguous():
     """Test error when CUDA tensor is non-contiguous."""
@@ -330,6 +346,7 @@ def test_encode_cuda_tensor_non_contiguous():
         engine.encode(data, 2, "amplitude")
 
 
+@requires_qdp
 @pytest.mark.gpu
 @pytest.mark.skipif(
     not _has_multi_gpu(), reason="Multi-GPU setup required for this test"
@@ -349,6 +366,7 @@ def test_encode_cuda_tensor_device_mismatch():
         engine.encode(data, 2, "amplitude")
 
 
+@requires_qdp
 @pytest.mark.gpu
 def test_encode_cuda_tensor_empty():
     """Test error when CUDA tensor is empty."""
@@ -367,6 +385,7 @@ def test_encode_cuda_tensor_empty():
         engine.encode(data, 2, "amplitude")
 
 
+@requires_qdp
 @pytest.mark.gpu
 @pytest.mark.parametrize(
     "data_shape,is_batch",
@@ -398,6 +417,7 @@ def test_encode_cuda_tensor_preserves_input(data_shape, 
is_batch):
     assert torch.equal(data, data_clone)
 
 
+@requires_qdp
 @pytest.mark.gpu
 @pytest.mark.parametrize("encoding_method", ["basis", "angle"])
 def test_encode_cuda_tensor_unsupported_encoding(encoding_method):
@@ -419,6 +439,7 @@ def 
test_encode_cuda_tensor_unsupported_encoding(encoding_method):
         engine.encode(data, 2, encoding_method)
 
 
+@requires_qdp
 @pytest.mark.gpu
 @pytest.mark.parametrize(
     "input_type,error_match",
@@ -454,6 +475,7 @@ def test_encode_3d_rejected(input_type, error_match):
         engine.encode(data, 2, "amplitude")
 
 
+@requires_qdp
 @pytest.mark.gpu
 @pytest.mark.parametrize(
     "tensor_factory,description",
@@ -489,6 +511,7 @@ def 
test_encode_cuda_tensor_non_finite_values(tensor_factory, description):
         engine.encode(data, 2, "amplitude")
 
 
+@requires_qdp
 @pytest.mark.gpu
 @pytest.mark.parametrize(
     "precision,expected_dtype",
@@ -514,6 +537,7 @@ def test_encode_cuda_tensor_output_dtype(precision, 
expected_dtype):
     )
 
 
+@requires_qdp
 @pytest.mark.gpu
 def test_basis_encode_basic():
     """Test basic basis encoding (requires GPU)."""
@@ -538,6 +562,7 @@ def test_basis_encode_basic():
     assert torch.allclose(torch_tensor, expected.to(torch_tensor.dtype))
 
 
+@requires_qdp
 @pytest.mark.gpu
 def test_basis_encode_nonzero_index():
     """Test basis encoding with non-zero index (requires GPU)."""
@@ -559,6 +584,7 @@ def test_basis_encode_nonzero_index():
     assert torch.allclose(torch_tensor, expected.to(torch_tensor.dtype))
 
 
+@requires_qdp
 @pytest.mark.gpu
 def test_basis_encode_3_qubits():
     """Test basis encoding with 3 qubits (requires GPU)."""
@@ -588,6 +614,7 @@ def test_basis_encode_3_qubits():
             assert host_tensor[i].imag == 0.0
 
 
+@requires_qdp
 @pytest.mark.gpu
 def test_basis_encode_errors():
     """Test error handling for basis encoding (requires GPU)."""
@@ -621,6 +648,7 @@ def test_basis_encode_errors():
         engine.encode([0.0, 1.0], 2, "basis")
 
 
+@requires_qdp
 @pytest.mark.gpu
 def test_angle_encode_basic():
     """Test basic angle encoding (requires GPU)."""
@@ -644,6 +672,7 @@ def test_angle_encode_basic():
     assert torch.allclose(torch_tensor, expected.to(torch_tensor.dtype))
 
 
+@requires_qdp
 @pytest.mark.gpu
 def test_angle_encode_nonzero_angles():
     """Test angle encoding with non-zero angles (requires GPU)."""
@@ -666,6 +695,7 @@ def test_angle_encode_nonzero_angles():
     )
 
 
+@requires_qdp
 @pytest.mark.gpu
 def test_angle_encode_batch():
     """Test batch angle encoding (requires GPU)."""
@@ -696,6 +726,7 @@ def test_angle_encode_batch():
     )
 
 
+@requires_qdp
 @pytest.mark.gpu
 def test_angle_encode_errors():
     """Test error handling for angle encoding (requires GPU)."""
@@ -717,6 +748,7 @@ def test_angle_encode_errors():
         engine.encode([float("nan"), 0.0], 2, "angle")
 
 
+@requires_qdp
 @pytest.mark.gpu
 @pytest.mark.parametrize(
     "data_shape,expected_shape",
@@ -748,6 +780,7 @@ def test_encode_numpy_array(data_shape, expected_shape):
     assert torch_tensor.shape == expected_shape
 
 
+@requires_qdp
 @pytest.mark.gpu
 def test_encode_pathlib_path():
     """Test encoding from pathlib.Path object."""
@@ -783,6 +816,7 @@ def test_encode_pathlib_path():
             os.remove(npy_path)
 
 
+@requires_qdp
 @pytest.mark.gpu
 def test_iqp_z_encode_basic():
     """Test basic IQP-Z encoding with zero angles (requires GPU).
@@ -813,6 +847,7 @@ def test_iqp_z_encode_basic():
     assert torch.allclose(torch_tensor, expected, atol=1e-6)
 
 
+@requires_qdp
 @pytest.mark.gpu
 def test_iqp_z_encode_nonzero():
     """Test IQP-Z encoding with non-zero angles (requires GPU)."""
@@ -838,6 +873,7 @@ def test_iqp_z_encode_nonzero():
     assert torch.allclose(norm, torch.tensor(1.0, device="cuda:0"), atol=1e-6)
 
 
+@requires_qdp
 @pytest.mark.gpu
 def test_iqp_encode_basic():
     """Test basic IQP encoding with ZZ interactions (requires GPU)."""
@@ -863,6 +899,7 @@ def test_iqp_encode_basic():
     assert torch.allclose(torch_tensor, expected, atol=1e-6)
 
 
+@requires_qdp
 @pytest.mark.gpu
 def test_iqp_encode_zz_effect():
     """Test that ZZ interaction produces different result than Z-only 
(requires GPU)."""
@@ -895,6 +932,7 @@ def test_iqp_encode_zz_effect():
     assert torch.allclose(norm_zz, torch.tensor(1.0, device="cuda:0"), 
atol=1e-6)
 
 
+@requires_qdp
 @pytest.mark.gpu
 def test_iqp_encode_3_qubits():
     """Test IQP encoding with 3 qubits (requires GPU)."""
@@ -920,6 +958,7 @@ def test_iqp_encode_3_qubits():
     assert torch.allclose(torch_tensor, expected, atol=1e-6)
 
 
+@requires_qdp
 @pytest.mark.gpu
 def test_iqp_z_encode_batch():
     """Test batch IQP-Z encoding (requires GPU)."""
@@ -948,6 +987,7 @@ def test_iqp_z_encode_batch():
     assert torch.allclose(norm_1, torch.tensor(1.0, device="cuda:0"), 
atol=1e-6)
 
 
+@requires_qdp
 @pytest.mark.gpu
 def test_iqp_encode_batch():
     """Test batch IQP encoding with ZZ interactions (requires GPU)."""
@@ -979,6 +1019,7 @@ def test_iqp_encode_batch():
     assert torch.allclose(norm_1, torch.tensor(1.0, device="cuda:0"), 
atol=1e-6)
 
 
+@requires_qdp
 @pytest.mark.gpu
 def test_iqp_encode_single_qubit():
     """Test IQP encoding with single qubit edge case (requires GPU)."""
@@ -1007,6 +1048,7 @@ def test_iqp_encode_single_qubit():
     assert torch.allclose(torch_tensor2, expected, atol=1e-6)
 
 
+@requires_qdp
 @pytest.mark.gpu
 def test_iqp_encode_errors():
     """Test error handling for IQP encoding (requires GPU)."""
diff --git a/testing/qdp/test_high_fidelity.py 
b/testing/qdp/test_high_fidelity.py
index 20725d606..a4641e809 100644
--- a/testing/qdp/test_high_fidelity.py
+++ b/testing/qdp/test_high_fidelity.py
@@ -23,7 +23,8 @@ import pytest
 import torch
 import numpy as np
 import concurrent.futures
-from _qdp import QdpEngine
+
+from .qdp_test_utils import requires_qdp
 
 np.random.seed(2026)
 
@@ -55,6 +56,8 @@ def calculate_fidelity(
 @pytest.fixture(scope="module")
 def engine():
     """Initialize QDP engine (module-scoped singleton)."""
+    from _qdp import QdpEngine
+
     try:
         return QdpEngine(0)
     except RuntimeError as e:
@@ -64,6 +67,8 @@ def engine():
 @pytest.fixture(scope="module")
 def engine_float64():
     """High-precision engine for fidelity-sensitive tests."""
+    from _qdp import QdpEngine
+
     try:
         return QdpEngine(0, precision="float64")
     except RuntimeError as e:
@@ -73,6 +78,7 @@ def engine_float64():
 # 1. Core Logic and Boundary Tests
 
 
+@requires_qdp
 @pytest.mark.gpu
 @pytest.mark.parametrize(
     "num_qubits, data_size, desc",
@@ -114,6 +120,7 @@ def test_amplitude_encoding_fidelity_comprehensive(
     assert fidelity > (1.0 - 1e-14), f"Fidelity loss in {desc}! F={fidelity}"
 
 
+@requires_qdp
 @pytest.mark.gpu
 def test_complex_integrity(engine):
     """Verify imaginary part is effectively zero for amplitude encoding."""
@@ -137,6 +144,7 @@ def test_complex_integrity(engine):
 # 2. Numerical Stability Tests
 
 
+@requires_qdp
 @pytest.mark.gpu
 def test_numerical_stability_underflow(engine_float64):
     """Test precision with extremely small values (1e-150)."""
@@ -156,6 +164,7 @@ def test_numerical_stability_underflow(engine_float64):
 # 3. Memory Leak Tests
 
 
+@requires_qdp
 @pytest.mark.gpu
 def test_memory_leak_quantitative(engine):
     """Quantitative memory leak test using torch.cuda.memory_allocated()."""
@@ -184,6 +193,7 @@ def test_memory_leak_quantitative(engine):
     )
 
 
+@requires_qdp
 @pytest.mark.gpu
 def test_memory_safety_stress(engine):
     """Stress test: rapid encode/release to verify DLPack deleter."""
@@ -209,6 +219,7 @@ def test_memory_safety_stress(engine):
 # 4. Thread Safety Tests
 
 
+@requires_qdp
 @pytest.mark.gpu
 def test_multithreaded_access(engine):
     """Test concurrent access from multiple threads (validates Send+Sync)."""
@@ -241,6 +252,7 @@ def test_multithreaded_access(engine):
 # 5. Error Propagation Tests
 
 
+@requires_qdp
 @pytest.mark.gpu
 def test_error_propagation(engine):
     """Verify Rust errors are correctly propagated to Python RuntimeError."""
diff --git a/testing/qdp/test_numpy.py b/testing/qdp/test_numpy.py
index c5f53ecbd..b354bb5f3 100644
--- a/testing/qdp/test_numpy.py
+++ b/testing/qdp/test_numpy.py
@@ -23,7 +23,7 @@ import numpy as np
 import pytest
 import torch
 
-from _qdp import QdpEngine
+from .qdp_test_utils import requires_qdp
 
 
 def _verify_tensor(tensor, expected_shape, check_normalization=False):
@@ -40,6 +40,7 @@ def _verify_tensor(tensor, expected_shape, 
check_normalization=False):
         )
 
 
+@requires_qdp
 @pytest.mark.gpu
 @pytest.mark.parametrize(
     "num_samples,num_qubits,check_norm",
@@ -51,6 +52,8 @@ def _verify_tensor(tensor, expected_shape, 
check_normalization=False):
 )
 def test_encode_from_numpy_file(num_samples, num_qubits, check_norm):
     """Test NumPy file encoding"""
+    from _qdp import QdpEngine
+
     pytest.importorskip("torch")
     if not torch.cuda.is_available():
         pytest.skip("GPU required for QdpEngine")
@@ -79,10 +82,13 @@ def test_encode_from_numpy_file(num_samples, num_qubits, 
check_norm):
             os.remove(npy_path)
 
 
+@requires_qdp
 @pytest.mark.gpu
 @pytest.mark.parametrize("num_qubits", [1, 2, 3, 4])
 def test_encode_numpy_array_1d(num_qubits):
     """Test 1D NumPy array encoding (single sample)"""
+    from _qdp import QdpEngine
+
     pytest.importorskip("torch")
     if not torch.cuda.is_available():
         pytest.skip("GPU required for QdpEngine")
@@ -97,10 +103,13 @@ def test_encode_numpy_array_1d(num_qubits):
     _verify_tensor(tensor, (1, sample_size), check_normalization=True)
 
 
+@requires_qdp
 @pytest.mark.gpu
 @pytest.mark.parametrize("num_samples,num_qubits", [(5, 2), (10, 3)])
 def test_encode_numpy_array_2d(num_samples, num_qubits):
     """Test 2D NumPy array encoding (batch)"""
+    from _qdp import QdpEngine
+
     pytest.importorskip("torch")
     if not torch.cuda.is_available():
         pytest.skip("GPU required for QdpEngine")
@@ -116,10 +125,13 @@ def test_encode_numpy_array_2d(num_samples, num_qubits):
     _verify_tensor(tensor, (num_samples, sample_size), 
check_normalization=True)
 
 
+@requires_qdp
 @pytest.mark.gpu
 @pytest.mark.parametrize("encoding_method", ["amplitude"])
 def test_encode_numpy_encoding_methods(encoding_method):
     """Test different encoding methods"""
+    from _qdp import QdpEngine
+
     pytest.importorskip("torch")
     if not torch.cuda.is_available():
         pytest.skip("GPU required for QdpEngine")
@@ -135,6 +147,7 @@ def test_encode_numpy_encoding_methods(encoding_method):
     _verify_tensor(tensor, (1, sample_size))
 
 
+@requires_qdp
 @pytest.mark.gpu
 @pytest.mark.parametrize(
     "precision,expected_dtype",
@@ -145,6 +158,8 @@ def test_encode_numpy_encoding_methods(encoding_method):
 )
 def test_encode_numpy_precision(precision, expected_dtype):
     """Test different precision settings"""
+    from _qdp import QdpEngine
+
     pytest.importorskip("torch")
     if not torch.cuda.is_available():
         pytest.skip("GPU required for QdpEngine")
@@ -160,6 +175,7 @@ def test_encode_numpy_precision(precision, expected_dtype):
     )
 
 
+@requires_qdp
 @pytest.mark.gpu
 @pytest.mark.parametrize(
     "data,error_match",
@@ -176,6 +192,8 @@ def test_encode_numpy_precision(precision, expected_dtype):
 )
 def test_encode_numpy_errors(data, error_match):
     """Test error handling for invalid inputs"""
+    from _qdp import QdpEngine
+
     pytest.importorskip("torch")
     if not torch.cuda.is_available():
         pytest.skip("GPU required for QdpEngine")

Reply via email to