ptrendx commented on a change in pull request #19036:
URL: https://github.com/apache/incubator-mxnet/pull/19036#discussion_r498465581



##########
File path: tests/python/gpu/test_contrib_amp.py
##########
@@ -66,45 +66,39 @@ def test_amp_coverage(amp_tests):
     assert ret == [], "Elements " + str(ret) + " exist in more than 1 AMP 
list."
 
     # Check the coverage
-    py_str = lambda x: x.decode('utf-8')
-
-    plist = ctypes.POINTER(ctypes.c_char_p)()
-    size = ctypes.c_uint()
-
-    mx.base._LIB.MXListAllOpNames(ctypes.byref(size),
-                                     ctypes.byref(plist))
-    op_names = []
-    for i in range(size.value):
-        s = py_str(plist[i])
-        if not s.startswith("_backward") \
-           and not s.startswith("_contrib_backward_"):
-            op_names.append(s)
-
-    ret1 = set(op_names) - set(t)
-
-    if ret1 != set():
-        warnings.warn("Operators " + str(ret1) + " do not exist in AMP lists 
(in "
-                       "python/mxnet/contrib/amp/lists/symbol_fp16.py) - 
please add them. "
-                       """Please follow these guidelines for choosing a proper 
list:
-                       - if your operator is not to be used in a computational 
graph
-                         (e.g. image manipulation operators, optimizers) or 
does not have
-                         inputs, put it in FP16_FP32_FUNCS list,
-                       - if your operator requires FP32 inputs or is not safe 
to use with lower
-                         precision, put it in FP32_FUNCS list,
-                       - if your operator supports both FP32 and lower 
precision, has
-                         multiple inputs and expects all inputs to be of the 
same
-                         type, put it in WIDEST_TYPE_CASTS list,
-                       - if your operator supports both FP32 and lower 
precision and has
-                         either a single input or supports inputs of different 
type,
-                         put it in FP16_FP32_FUNCS list,
-                       - if your operator is both safe to use in lower 
precision and
-                         it is highly beneficial to use it in lower precision, 
then
-                         put it in FP16_FUNCS (this is unlikely for new 
operators)
-                       - If you are not sure which list to choose, FP32_FUNCS 
is the
-                         safest option""")
+    covered = set(t)
+    ops = get_all_registered_operators_grouped()
+    required = set(k for k in ops
+                   if not k.startswith(("_backward", "_contrib_backward", 
"_npi_backward")) and
+                   not k.endswith("_backward"))
+
+    extra = covered - required
+    assert not extra, f"{len(extra)} operators are not needed in the AMP 
lists: {sorted(extra)}"
+
+    guidelines = """Please follow these guidelines for choosing a proper list:
+    - if your operator is not to be used in a computational graph
+      (e.g. image manipulation operators, optimizers) or does not have
+      inputs, put it in FP16_FP32_FUNCS list,
+    - if your operator requires FP32 inputs or is not safe to use with lower
+      precision, put it in FP32_FUNCS list,
+    - if your operator supports both FP32 and lower precision, has
+      multiple inputs and expects all inputs to be of the same
+      type, put it in WIDEST_TYPE_CASTS list,
+    - if your operator supports both FP32 and lower precision and has
+      either a single input or supports inputs of different type,
+      put it in FP16_FP32_FUNCS list,
+    - if your operator is both safe to use in lower precision and
+      it is highly beneficial to use it in lower precision, then
+      put it in FP16_FUNCS (this is unlikely for new operators)
+    - If you are not sure which list to choose, FP32_FUNCS is the
+                     safest option"""
+    diff = required - covered
+    assert not diff, f"{len(diff)} operators {sorted(diff)} do not exist in 
AMP lists (in " \
+        f"python/mxnet/contrib/amp/lists/symbol_fp16.py) - please add them. " \
+        f"\n{guidelines}"
 
 @with_seed()
[email protected](reason='Error during waitall(). Tracked in #18099')
[email protected](reason="WIP")

Review comment:
       ?




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to