apeforest commented on a change in pull request #17449: Implemented large 
tensor flag for opperf testing
URL: https://github.com/apache/incubator-mxnet/pull/17449#discussion_r373707350
 
 

 ##########
 File path: benchmark/opperf/nd_operations/nn_activation_operators.py
 ##########
 @@ -55,55 +57,106 @@ def run_activation_operators_benchmarks(ctx=mx.cpu(), 
dtype='float32', profiler=
     Dictionary of results. Key -> Name of the operator, Value -> Benchmark 
results.
 
     """
-    # Relu and its variation
-    relu_benchmark_res = run_performance_test([getattr(MX_OP_MODULE, 
"LeakyReLU")],
-                                              run_backward=True,
-                                              dtype=dtype,
-                                              ctx=ctx,
-                                              profiler=profiler,
-                                              inputs=[{"data": (1024, 1024), 
"act_type": "leaky", "slope": 0.1},
-                                                      {"data": (10000, 1), 
"act_type": "leaky", "slope": 0.1},
-                                                      {"data": (10000, 100), 
"act_type": "leaky", "slope": 0.1},
-                                                      {"data": (1024, 1024), 
"act_type": "elu", "slope": 0.1},
-                                                      {"data": (10000, 1), 
"act_type": "elu", "slope": 0.1},
-                                                      {"data": (10000, 100), 
"act_type": "elu", "slope": 0.1},
-                                                      {"data": (1024, 1024), 
"act_type": "selu"},
-                                                      {"data": (10000, 1), 
"act_type": "selu"},
-                                                      {"data": (10000, 100), 
"act_type": "selu"},
-                                                      {"data": (1024, 1024), 
"act_type": "prelu", "gamma": (1, 1024)},
-                                                      {"data": (10000, 1), 
"act_type": "prelu", "gamma": (1, 1)},
-                                                      {"data": (10000, 100), 
"act_type": "prelu", "gamma": (1, 100)}
-                                                      ],
-                                              warmup=warmup,
-                                              runs=runs)
+    if large_tensor == 'on':
+        # Relu and its variation
+        relu_benchmark_res = run_performance_test([getattr(MX_OP_MODULE, 
"LeakyReLU")],
+                                                  run_backward=True,
+                                                  dtype=dtype,
+                                                  ctx=ctx,
+                                                  profiler=profiler,
+                                                  inputs=[{"data": (2**16, 
2**16), "act_type": "leaky", "slope": 0.1},
+                                                          {"data": (2**4, 
2**28), "act_type": "leaky", "slope": 0.1},
+                                                          {"data": (4, 2**30), 
"act_type": "leaky", "slope": 0.1},
+                                                          {"data": (2**16, 
2**16), "act_type": "elu", "slope": 0.1},
+                                                          {"data": (2**4, 
2**28), "act_type": "elu", "slope": 0.1},
+                                                          {"data": (4, 2**30), 
"act_type": "elu", "slope": 0.1},
+                                                          {"data": (2**16, 
2**16), "act_type": "selu"},
+                                                          {"data": (2**4, 
2**28), "act_type": "selu"},
+                                                          {"data": (4, 2**30), 
"act_type": "selu"},
+                                                          {"data": (2**16, 
2**16), "act_type": "prelu", "gamma": (1, 2**16)},
+                                                          {"data": (2**4, 
2**28), "act_type": "prelu", "gamma": (1, 2**28)},
+                                                          {"data": (4, 2**30), 
"act_type": "prelu", "gamma": (1, 2**30)}
+                                                         ],
+                                                  warmup=warmup,
+                                                  runs=runs)
 
-    # Sigmoid => Covered as part of Unary ops
-    # Hard_Sigmoid
-    hard_sigmoid_benchmark_res = run_performance_test([getattr(MX_OP_MODULE, 
"hard_sigmoid")],
-                                                      run_backward=True,
-                                                      dtype=dtype,
-                                                      ctx=ctx,
-                                                      profiler=profiler,
-                                                      inputs=[{"data": (1024, 
1024), "alpha": 0.25, "beta": 0.5},
-                                                              {"data": (10000, 
1), "alpha": 0.25, "beta": 0.5},
-                                                              {"data": (10000, 
100), "alpha": 0.25, "beta": 0.5}
-                                                              ],
-                                                      warmup=warmup,
-                                                      runs=runs)
+        # Sigmoid => Covered as part of Unary ops
+        # Hard_Sigmoid
+        hard_sigmoid_benchmark_res = 
run_performance_test([getattr(MX_OP_MODULE, "hard_sigmoid")],
+                                                          run_backward=True,
+                                                          dtype=dtype,
+                                                          ctx=ctx,
+                                                          profiler=profiler,
+                                                          inputs=[{"data": 
(2**16, 2**16), "alpha": 0.25, "beta": 0.5},
+                                                                  {"data": 
(2**4, 2**28), "alpha": 0.25, "beta": 0.5},
+                                                                  {"data": (4, 
2**30), "alpha": 0.25, "beta": 0.5}
+                                                                 ],
+                                                          warmup=warmup,
+                                                          runs=runs)
 
-    # Softmax, LogSoftmax
-    softmax_benchmark_res = run_performance_test([getattr(MX_OP_MODULE, 
"softmax"),
-                                                  getattr(MX_OP_MODULE, 
"log_softmax")],
-                                                 run_backward=True,
-                                                 dtype=dtype,
-                                                 ctx=ctx,
-                                                 profiler=profiler,
-                                                 inputs=[{"data": (1024, 
1024), "axis": -1, "temperature": 0.5},
-                                                         {"data": (10000, 1), 
"axis": -1, "temperature": 0.5},
-                                                         {"data": (10000, 
100), "axis": -1, "temperature": 0.5}
+        # Softmax, LogSoftmax
+        softmax_benchmark_res = run_performance_test([getattr(MX_OP_MODULE, 
"softmax"),
+                                                      getattr(MX_OP_MODULE, 
"log_softmax")],
+                                                     run_backward=True,
+                                                     dtype=dtype,
+                                                     ctx=ctx,
+                                                     profiler=profiler,
+                                                     inputs=[{"data": (2**16, 
2**16), "axis": -1, "temperature": 0.5},
+                                                             {"data": (2**4, 
2**28), "axis": -1, "temperature": 0.5},
+                                                             {"data": (4, 
2**30), "axis": -1, "temperature": 0.5}
+                                                            ],
+                                                     warmup=warmup,
+                                                     runs=runs)
+    else:
 
 Review comment:
   It seems the only difference between the if and else branch is the `inputs` 
argument. Can we only generate different inputs in the if/else branch and pass 
them to the same operator function?

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

Reply via email to