comaniac commented on a change in pull request #10122:
URL: https://github.com/apache/tvm/pull/10122#discussion_r796180230
##########
File path: tests/python/relay/test_op_level1.py
##########
@@ -65,18 +65,16 @@ class TestUnaryOp:
def test_unary_op(self, target, dev, relay_op, ref_func, dtype):
target = tvm.target.Target(target)
- if (
- dtype == "float16"
- and target.kind.name == "cuda"
- and not have_fp16(tvm.cuda(0).compute_version)
- ):
- pytest.xfail("No float16 support on local cuda device")
- elif (
- dtype == "float16"
- and target.kind.name == "cuda"
- and not target.attrs.get("supports_float16", False)
- ):
- pytest.xfail("No float16 support on vulkan target")
+ if dtype == "float16":
+ if target.kind.name == "cuda":
+ if not have_fp16(tvm.cuda(0).compute_version):
+ pytest.xfail(
+ "No float16 support on local cuda device
(compute_version != 5.3 and < 6.0)"
+ )
+ elif target.kind.name == "vulkan" and not
target.attrs.get("supports_float16", False):
+ pytest.xfail("No float16 support on vulkan target
(supports_float16=False)")
+ else:
+ pytest.xfail(f"No float16 support on {target.kind.name}
target")
Review comment:
I'm not sure which targets could support float16 other than CUDA. At
least LLVM is not the case. Please let me know if there's any other target we
should include.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]