anijain2305 commented on issue #4878: [Relay][SimplifyInference] Express 
Softmax as sequence of Relay ops
URL: https://github.com/apache/incubator-tvm/pull/4878#issuecomment-586184247
 
 
   Incase, anybody wants to run on their end
   
   ~~~
   
   import tvm
   from tvm import relay
   import numpy as np
   from tvm.contrib import graph_runtime
   from tvm.contrib.debugger import debug_runtime
   import time
   import os
   
   def test(dshape, axis_):
       dtype = 'float32'
       x = relay.var('x', shape=dshape, dtype=dtype)
       y = relay.nn.softmax(x, axis=axis_)
       func = relay.Function([x], y)
       mod = tvm.relay.Module.from_expr(func)
   
       np.random.seed(0)
       data = np.random.uniform(size=dshape)
   
       def run(graph, lib):
           ctx = tvm.gpu()
           module = graph_runtime.create(graph, lib, ctx)
           # module = debug_runtime.create(graph, lib, ctx)
           module.set_input('x', tvm.nd.array(data.astype(dtype)))
   
           for i in range(0, 50):
               module.run()
           time1 = time.time()
   
           num_iters = 100
           for i in range(0, num_iters):
               module.run()
           time2 = time.time()
           return (time2 - time1)/num_iters * 1000
   
       with relay.build_config(opt_level=3, 
disabled_pass=["SimplifyInference"]):
           graph_master, lib_master, params = relay.build(mod, target="cuda")
   
       with relay.build_config(opt_level=3):
           graph_new, lib_new, params = relay.build(mod, target="cuda")
   
       master = run(graph_master, lib_master)
       new_pr = run(graph_new, lib_new)
       print(master, new_pr, master/new_pr, sep='\t', end='\t')
   
   # Soiferj
   test((1, 12, 128, 128), -1)
   
   # Masahi
   test((1, 16, 256, 256), 1)
   
   # Animesh - VGG SSD
   test((1, 21, 8732), 1)
   print()
   ~~~

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

Reply via email to