Package: src:dep-logic
Version: 0.5.2-1
Severity: serious
Tags: sid forky ftbfs
dep-logic ftbfs in unstable:
[...]
I: pybuild base:385: cd
/home/packages/tmp/o/dep-logic-0.5.2/.pybuild/cpython3_3.14_dep-logic/build;
python3.14 -m pytest tests
============================= test session starts
==============================
platform linux -- Python 3.14.3, pytest-9.0.2, pluggy-1.6.0
rootdir:
/home/packages/tmp/o/dep-logic-0.5.2/.pybuild/cpython3_3.14_dep-logic/build
configfile: pyproject.toml
plugins: typeguard-4.4.4, timeout-2.4.0, flaky-3.8.1, asyncio-1.3.0,
mock-3.15.1
asyncio: mode=Mode.STRICT, debug=False,
asyncio_default_fixture_loop_scope=None,
asyncio_default_test_loop_scope=function
collected 2479 items
tests/marker/test_common.py ............................
[ 1%]
tests/marker/test_compound.py ..........................................
[ 2%]
........................................................................
[ 5%]
.................................................
[ 7%]
tests/marker/test_evaluation.py .........................F..............
[ 9%]
.........................................
[ 10%]
tests/marker/test_expression.py ........................................
[ 12%]
...
[ 12%]
tests/marker/test_parsing.py ...........................................
[ 14%]
........................................................................
[ 17%]
........................................................................
[ 20%]
........................................................................
[ 23%]
........................................................................
[ 26%]
........................................................................
[ 28%]
........................................................................
[ 31%]
........................................................................
[ 34%]
........................................................................
[ 37%]
........................................................................
[ 40%]
........................................................................
[ 43%]
........................................................................
[ 46%]
........................................................................
[ 49%]
........................................................................
[ 52%]
........................................................................
[ 55%]
........................................................................
[ 58%]
........................................................................
[ 60%]
........................................................................
[ 63%]
........................................................................
[ 66%]
........................................................................
[ 69%]
........................................................................
[ 72%]
........................................................................
[ 75%]
........................................................................
[ 78%]
........................................................................
[ 81%]
........................................................................
[ 84%]
........................................................................
[ 87%]
........................................................................
[ 89%]
........................................................................
[ 92%]
...........................................................
[ 95%]
tests/specifier/test_arbitrary.py ..............F..
[ 95%]
tests/specifier/test_range.py ..........................................
[ 97%]
...................
[ 98%]
tests/specifier/test_union.py ...............
[ 98%]
tests/tags/test_platform.py ..............
[ 99%]
tests/tags/test_tags.py ...........
[100%]
=================================== FAILURES
===================================
_______ test_evaluate_extra[platform_release >= '6'-environment10-True]
________
marker_string = "platform_release >= '6'"
environment = {'platform_release': '6.1-foobar'}, expected = True
@pytest.mark.parametrize(
("marker_string", "environment", "expected"),
[
(f"os.name == '{os.name}'", None, True),
("sys.platform == 'win32'", {"sys_platform": "linux2"}, False),
("platform.version in 'Ubuntu'", {"platform_version":
"#39"}, False),
("platform.machine=='x86_64'", {"platform_machine":
"x86_64"}, True),
(
"platform.python_implementation=='Jython'",
{"platform_python_implementation": "CPython"},
False,
),
(
"python_version == '2.5' and
platform.python_implementation!= 'Jython'",
{"python_version": "2.7"},
False,
),
(
(
"platform_machine in 'x86_64 X86_64 aarch64 AARCH64
ppc64le PPC64LE"
" amd64 AMD64 win32 WIN32'"
),
{"platform_machine": "foo"},
False,
),
(
(
"platform_machine in 'x86_64 X86_64 aarch64 AARCH64
ppc64le PPC64LE"
" amd64 AMD64 win32 WIN32'"
),
{"platform_machine": "x86_64"},
True,
),
(
(
"platform_machine not in 'x86_64 X86_64 aarch64
AARCH64 ppc64le PPC64LE"
" amd64 AMD64 win32 WIN32'"
),
{"platform_machine": "foo"},
True,
),
(
(
"platform_machine not in 'x86_64 X86_64 aarch64
AARCH64 ppc64le PPC64LE"
" amd64 AMD64 win32 WIN32'"
),
{"platform_machine": "x86_64"},
False,
),
("platform_release >= '6'", {"platform_release":
"6.1-foobar"}, True),
# extras
# single extra
("extra != 'security'", {"extra": "quux"}, True),
("extra != 'security'", {"extra": "security"}, False),
("extra != 'security'", {}, True),
("extra != 'security'", {"platform_machine": "x86_64"}, True),
# normalization
("extra == 'Security.1'", {"extra": "security-1"}, True),
("extra == 'a'", {}, False),
("extra != 'a'", {}, True),
("extra == 'a' and extra == 'b'", {}, False),
("extra == 'a' or extra == 'b'", {}, False),
("extra != 'a' and extra != 'b'", {}, True),
("extra != 'a' or extra != 'b'", {}, True),
("extra != 'a' and extra == 'b'", {}, False),
("extra != 'a' or extra == 'b'", {}, True),
# multiple extras
("extra == 'a'", {"extra": ("a", "b")}, True),
("extra == 'a'", {"extra": ("b", "c")}, False),
("extra != 'a'", {"extra": ("a", "b")}, False),
("extra != 'a'", {"extra": ("b", "c")}, True),
("extra == 'a' and extra == 'b'", {"extra": ("a", "b",
"c")}, True),
("extra == 'a' and extra == 'b'", {"extra": ("a", "c")},
False),
("extra == 'a' or extra == 'b'", {"extra": ("a", "c")}, True),
("extra == 'a' or extra == 'b'", {"extra": ("b", "c")}, True),
("extra == 'a' or extra == 'b'", {"extra": ("c", "d")}, False),
("extra != 'a' and extra != 'b'", {"extra": ("a", "c")},
False),
("extra != 'a' and extra != 'b'", {"extra": ("b", "c")},
False),
("extra != 'a' and extra != 'b'", {"extra": ("c", "d")}, True),
("extra != 'a' or extra != 'b'", {"extra": ("a", "b",
"c")}, False),
("extra != 'a' or extra != 'b'", {"extra": ("a", "c")}, True),
("extra != 'a' or extra != 'b'", {"extra": ("b", "c")}, True),
("extra != 'a' and extra == 'b'", {"extra": ("a", "b")},
False),
("extra != 'a' and extra == 'b'", {"extra": ("b", "c")}, True),
("extra != 'a' and extra == 'b'", {"extra": ("c", "d")},
False),
("extra != 'a' or extra == 'b'", {"extra": ("a", "b")}, True),
("extra != 'a' or extra == 'b'", {"extra": ("c", "d")}, True),
("extra != 'a' or extra == 'b'", {"extra": ("a", "c")}, False),
],
)
def test_evaluate_extra(
marker_string: str, environment: dict[str, str | set[str]] |
None, expected: bool
) -> None:
m = parse_marker(marker_string)
> assert m.evaluate(environment) is expected
E assert False is True
E + where False = evaluate({'platform_release': '6.1-foobar'})
E + where evaluate = <MarkerExpression platform_release >=
"6">.evaluate
tests/marker/test_evaluation.py:151: AssertionError
__________________ test_arbitrary_unsupported[===abc->=1-and]
__________________
a = '===abc', b = '>=1', operand = 'and'
@pytest.mark.parametrize(
"a, b, operand",
[("===abc", ">=1", "and"), ("===1.0.0", "<1", "or"), ("===abc",
"==1.*", "or")],
)
def test_arbitrary_unsupported(a: str, b: str, operand: str) -> None:
> with pytest.raises(ValueError):
^^^^^^^^^^^^^^^^^^^^^^^^^
E Failed: DID NOT RAISE <class 'ValueError'>
tests/specifier/test_arbitrary.py:43: Failed
=========================== short test summary info
============================
FAILED
tests/marker/test_evaluation.py::test_evaluate_extra[platform_release >=
'6'-environment10-True]
FAILED
tests/specifier/test_arbitrary.py::test_arbitrary_unsupported[===abc->=1-and]
======================== 2 failed, 2477 passed in 1.38s
========================
E: pybuild pybuild:485: test: plugin pyproject failed with: exit code=1:
cd
/home/packages/tmp/o/dep-logic-0.5.2/.pybuild/cpython3_3.14_dep-logic/build;
python3.14 -m pytest tests
I: pybuild base:385: cd
/home/packages/tmp/o/dep-logic-0.5.2/.pybuild/cpython3_3.13_dep-logic/build;
python3.13 -m pytest tests
============================= test session starts
==============================
platform linux -- Python 3.13.12, pytest-9.0.2, pluggy-1.6.0
rootdir:
/home/packages/tmp/o/dep-logic-0.5.2/.pybuild/cpython3_3.13_dep-logic/build
configfile: pyproject.toml
plugins: typeguard-4.4.4, timeout-2.4.0, flaky-3.8.1, asyncio-1.3.0,
mock-3.15.1
asyncio: mode=Mode.STRICT, debug=False,
asyncio_default_fixture_loop_scope=None,
asyncio_default_test_loop_scope=function
collected 2479 items
tests/marker/test_common.py ............................
[ 1%]
tests/marker/test_compound.py ..........................................
[ 2%]
........................................................................
[ 5%]
.................................................
[ 7%]
tests/marker/test_evaluation.py .........................F..............
[ 9%]
.........................................
[ 10%]
tests/marker/test_expression.py ........................................
[ 12%]
...
[ 12%]
tests/marker/test_parsing.py ...........................................
[ 14%]
........................................................................
[ 17%]
........................................................................
[ 20%]
........................................................................
[ 23%]
........................................................................
[ 26%]
........................................................................
[ 28%]
........................................................................
[ 31%]
........................................................................
[ 34%]
........................................................................
[ 37%]
........................................................................
[ 40%]
........................................................................
[ 43%]
........................................................................
[ 46%]
........................................................................
[ 49%]
........................................................................
[ 52%]
........................................................................
[ 55%]
........................................................................
[ 58%]
........................................................................
[ 60%]
........................................................................
[ 63%]
........................................................................
[ 66%]
........................................................................
[ 69%]
........................................................................
[ 72%]
........................................................................
[ 75%]
........................................................................
[ 78%]
........................................................................
[ 81%]
........................................................................
[ 84%]
........................................................................
[ 87%]
........................................................................
[ 89%]
........................................................................
[ 92%]
...........................................................
[ 95%]
tests/specifier/test_arbitrary.py ..............F..
[ 95%]
tests/specifier/test_range.py ..........................................
[ 97%]
...................
[ 98%]
tests/specifier/test_union.py ...............
[ 98%]
tests/tags/test_platform.py ..............
[ 99%]
tests/tags/test_tags.py ...........
[100%]
=================================== FAILURES
===================================
_______ test_evaluate_extra[platform_release >= '6'-environment10-True]
________
marker_string = "platform_release >= '6'"
environment = {'platform_release': '6.1-foobar'}, expected = True
@pytest.mark.parametrize(
("marker_string", "environment", "expected"),
[
(f"os.name == '{os.name}'", None, True),
("sys.platform == 'win32'", {"sys_platform": "linux2"}, False),
("platform.version in 'Ubuntu'", {"platform_version":
"#39"}, False),
("platform.machine=='x86_64'", {"platform_machine":
"x86_64"}, True),
(
"platform.python_implementation=='Jython'",
{"platform_python_implementation": "CPython"},
False,
),
(
"python_version == '2.5' and
platform.python_implementation!= 'Jython'",
{"python_version": "2.7"},
False,
),
(
(
"platform_machine in 'x86_64 X86_64 aarch64 AARCH64
ppc64le PPC64LE"
" amd64 AMD64 win32 WIN32'"
),
{"platform_machine": "foo"},
False,
),
(
(
"platform_machine in 'x86_64 X86_64 aarch64 AARCH64
ppc64le PPC64LE"
" amd64 AMD64 win32 WIN32'"
),
{"platform_machine": "x86_64"},
True,
),
(
(
"platform_machine not in 'x86_64 X86_64 aarch64
AARCH64 ppc64le PPC64LE"
" amd64 AMD64 win32 WIN32'"
),
{"platform_machine": "foo"},
True,
),
(
(
"platform_machine not in 'x86_64 X86_64 aarch64
AARCH64 ppc64le PPC64LE"
" amd64 AMD64 win32 WIN32'"
),
{"platform_machine": "x86_64"},
False,
),
("platform_release >= '6'", {"platform_release":
"6.1-foobar"}, True),
# extras
# single extra
("extra != 'security'", {"extra": "quux"}, True),
("extra != 'security'", {"extra": "security"}, False),
("extra != 'security'", {}, True),
("extra != 'security'", {"platform_machine": "x86_64"}, True),
# normalization
("extra == 'Security.1'", {"extra": "security-1"}, True),
("extra == 'a'", {}, False),
("extra != 'a'", {}, True),
("extra == 'a' and extra == 'b'", {}, False),
("extra == 'a' or extra == 'b'", {}, False),
("extra != 'a' and extra != 'b'", {}, True),
("extra != 'a' or extra != 'b'", {}, True),
("extra != 'a' and extra == 'b'", {}, False),
("extra != 'a' or extra == 'b'", {}, True),
# multiple extras
("extra == 'a'", {"extra": ("a", "b")}, True),
("extra == 'a'", {"extra": ("b", "c")}, False),
("extra != 'a'", {"extra": ("a", "b")}, False),
("extra != 'a'", {"extra": ("b", "c")}, True),
("extra == 'a' and extra == 'b'", {"extra": ("a", "b",
"c")}, True),
("extra == 'a' and extra == 'b'", {"extra": ("a", "c")},
False),
("extra == 'a' or extra == 'b'", {"extra": ("a", "c")}, True),
("extra == 'a' or extra == 'b'", {"extra": ("b", "c")}, True),
("extra == 'a' or extra == 'b'", {"extra": ("c", "d")}, False),
("extra != 'a' and extra != 'b'", {"extra": ("a", "c")},
False),
("extra != 'a' and extra != 'b'", {"extra": ("b", "c")},
False),
("extra != 'a' and extra != 'b'", {"extra": ("c", "d")}, True),
("extra != 'a' or extra != 'b'", {"extra": ("a", "b",
"c")}, False),
("extra != 'a' or extra != 'b'", {"extra": ("a", "c")}, True),
("extra != 'a' or extra != 'b'", {"extra": ("b", "c")}, True),
("extra != 'a' and extra == 'b'", {"extra": ("a", "b")},
False),
("extra != 'a' and extra == 'b'", {"extra": ("b", "c")}, True),
("extra != 'a' and extra == 'b'", {"extra": ("c", "d")},
False),
("extra != 'a' or extra == 'b'", {"extra": ("a", "b")}, True),
("extra != 'a' or extra == 'b'", {"extra": ("c", "d")}, True),
("extra != 'a' or extra == 'b'", {"extra": ("a", "c")}, False),
],
)
def test_evaluate_extra(
marker_string: str, environment: dict[str, str | set[str]] |
None, expected: bool
) -> None:
m = parse_marker(marker_string)
> assert m.evaluate(environment) is expected
E assert False is True
E + where False = evaluate({'platform_release': '6.1-foobar'})
E + where evaluate = <MarkerExpression platform_release >=
"6">.evaluate
tests/marker/test_evaluation.py:151: AssertionError
__________________ test_arbitrary_unsupported[===abc->=1-and]
__________________
a = '===abc', b = '>=1', operand = 'and'
@pytest.mark.parametrize(
"a, b, operand",
[("===abc", ">=1", "and"), ("===1.0.0", "<1", "or"), ("===abc",
"==1.*", "or")],
)
def test_arbitrary_unsupported(a: str, b: str, operand: str) -> None:
> with pytest.raises(ValueError):
^^^^^^^^^^^^^^^^^^^^^^^^^
E Failed: DID NOT RAISE <class 'ValueError'>
tests/specifier/test_arbitrary.py:43: Failed
=========================== short test summary info
============================
FAILED
tests/marker/test_evaluation.py::test_evaluate_extra[platform_release >=
'6'-environment10-True]
FAILED
tests/specifier/test_arbitrary.py::test_arbitrary_unsupported[===abc->=1-and]
======================== 2 failed, 2477 passed in 1.34s
========================
E: pybuild pybuild:485: test: plugin pyproject failed with: exit code=1:
cd
/home/packages/tmp/o/dep-logic-0.5.2/.pybuild/cpython3_3.13_dep-logic/build;
python3.13 -m pytest tests
dh_auto_test: error: pybuild --test --test-pytest -i python{version} -p
"3.14 3.13" --parallel=8 returned exit code 13
make: *** [debian/rules:6: binary] Error 25
dpkg-buildpackage: error: debian/rules binary subprocess failed with
exit status 2