mbrookhart commented on a change in pull request #7154:
URL: https://github.com/apache/tvm/pull/7154#discussion_r548220676



##########
File path: tests/python/frontend/pytorch/test_object_detection.py
##########
@@ -102,38 +105,55 @@ def test_detection_models():
     scripted_model = generate_jit_model(1)
     mod, params = relay.frontend.from_pytorch(scripted_model, shape_list)
 
-    with tvm.transform.PassContext(opt_level=3, 
disabled_pass=["FoldScaleAxis"]):
-        vm_exec = relay.vm.compile(mod, target=target, params=params)
+    def compile_and_run_vm(mod, params, data_np):
+        with tvm.transform.PassContext(opt_level=3, 
disabled_pass=["FoldScaleAxis"]):
+            vm_exec = relay.vm.compile(mod, target=target, params=params)
 
-    ctx = tvm.cpu()
-    vm = VirtualMachine(vm_exec, ctx)
-    data = process_image(img)
-    pt_res = scripted_model(data)
-    data = data.detach().numpy()
-    vm.set_input("main", **{input_name: data})
-    tvm_res = vm.run()
+        ctx = tvm.context(target, 0)
+        vm = VirtualMachine(vm_exec, ctx)
+        vm.set_input("main", **{input_name: data_np})
+        return vm.run()
 
+    data = process_image(img)
+    data_np = data.detach().numpy()
+    tvm_res = compile_and_run_vm(mod, params, data_np)
     # Note: due to accumulated numerical error, we can't directly compare 
results
     # with pytorch output. Some boxes might have a quite tiny difference in 
score
     # and the order can become different. We just measure how many valid boxes
     # there are for input image.
+    pt_res = scripted_model(data)
     pt_scores = pt_res[1].detach().numpy().tolist()
     tvm_scores = tvm_res[1].asnumpy().tolist()
-    num_pt_valid_scores = num_tvm_valid_scores = 0
 
-    for score in pt_scores:
-        if score >= score_threshold:
-            num_pt_valid_scores += 1
-        else:
-            break
+    def count_valid_scores(scores):
+        num_valid_scores = 0
+        for score in pt_scores:
+            if score >= score_threshold:
+                num_valid_scores += 1
+            else:
+                return num_valid_scores
 
-    for score in tvm_scores:
-        if score >= score_threshold:
-            num_tvm_valid_scores += 1
-        else:
-            break
+    num_pt_valid_scores = count_valid_scores(pt_scores)
+    num_tvm_valid_scores = count_valid_scores(tvm_scores)
 
     assert num_pt_valid_scores == num_tvm_valid_scores, (
         "Output mismatch: Under score threshold {}, Pytorch has {} valid "
         "boxes while TVM has {}.".format(score_threshold, num_pt_valid_scores, 
num_tvm_valid_scores)
     )
+
+    before = mod["main"]
+    after = rewrite(NMSRewrite(), before)
+    # TODO(masahi): Is there a better way to test if the desired rewrite has 
happened?

Review comment:
       I think this is fine for an integration test. If you want to write a 
unit test for your rewrite, I would suggest running it on a minimal example. I 
don't think it's strictly necessary.
   
   What would you imagine a search and not-rewrite should return? You could 
test the pattern serach part by adding a throw into your callback and check 
that the raised error matches.

##########
File path: tests/python/frontend/pytorch/test_object_detection.py
##########
@@ -102,38 +105,55 @@ def test_detection_models():
     scripted_model = generate_jit_model(1)
     mod, params = relay.frontend.from_pytorch(scripted_model, shape_list)
 
-    with tvm.transform.PassContext(opt_level=3, 
disabled_pass=["FoldScaleAxis"]):
-        vm_exec = relay.vm.compile(mod, target=target, params=params)
+    def compile_and_run_vm(mod, params, data_np):
+        with tvm.transform.PassContext(opt_level=3, 
disabled_pass=["FoldScaleAxis"]):
+            vm_exec = relay.vm.compile(mod, target=target, params=params)
 
-    ctx = tvm.cpu()
-    vm = VirtualMachine(vm_exec, ctx)
-    data = process_image(img)
-    pt_res = scripted_model(data)
-    data = data.detach().numpy()
-    vm.set_input("main", **{input_name: data})
-    tvm_res = vm.run()
+        ctx = tvm.context(target, 0)
+        vm = VirtualMachine(vm_exec, ctx)
+        vm.set_input("main", **{input_name: data_np})
+        return vm.run()
 
+    data = process_image(img)
+    data_np = data.detach().numpy()
+    tvm_res = compile_and_run_vm(mod, params, data_np)
     # Note: due to accumulated numerical error, we can't directly compare 
results
     # with pytorch output. Some boxes might have a quite tiny difference in 
score
     # and the order can become different. We just measure how many valid boxes
     # there are for input image.
+    pt_res = scripted_model(data)
     pt_scores = pt_res[1].detach().numpy().tolist()
     tvm_scores = tvm_res[1].asnumpy().tolist()
-    num_pt_valid_scores = num_tvm_valid_scores = 0
 
-    for score in pt_scores:
-        if score >= score_threshold:
-            num_pt_valid_scores += 1
-        else:
-            break
+    def count_valid_scores(scores):
+        num_valid_scores = 0
+        for score in pt_scores:
+            if score >= score_threshold:
+                num_valid_scores += 1
+            else:
+                return num_valid_scores
 
-    for score in tvm_scores:
-        if score >= score_threshold:
-            num_tvm_valid_scores += 1
-        else:
-            break
+    num_pt_valid_scores = count_valid_scores(pt_scores)
+    num_tvm_valid_scores = count_valid_scores(tvm_scores)
 
     assert num_pt_valid_scores == num_tvm_valid_scores, (
         "Output mismatch: Under score threshold {}, Pytorch has {} valid "
         "boxes while TVM has {}.".format(score_threshold, num_pt_valid_scores, 
num_tvm_valid_scores)
     )
+
+    before = mod["main"]
+    after = rewrite(NMSRewrite(), before)
+    # TODO(masahi): Is there a better way to test if the desired rewrite has 
happened?

Review comment:
       I think this is fine for an integration test. If you want to write a 
unit test for your rewrite, I would suggest running it on a minimal example. I 
don't think it's strictly necessary.
   
   What would you imagine a search and not-rewrite should return? You could 
test the pattern search part by adding a throw into your callback and check 
that the raised error matches.




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to