apeforest commented on a change in pull request #17449: Implemented large tensor flag for opperf testing URL: https://github.com/apache/incubator-mxnet/pull/17449#discussion_r373707375
##########
File path: benchmark/opperf/nd_operations/nn_basic_operators.py
##########
@@ -29,58 +29,132 @@
"""
-def run_nn_basic_operators_benchmarks(ctx=mx.cpu(), dtype='float32',
profiler='native', warmup=25, runs=100):
- # FullyConnnected operator benchmarks
- fc_benchmark_res = run_performance_test([getattr(MX_OP_MODULE,
"FullyConnected")],
- run_backward=True,
- dtype=dtype,
- ctx=ctx,
- profiler=profiler,
- inputs=[{"data": (32, 3, 256, 256),
- "num_hidden": 64,
- "weight": (64, 3 * 256 *
256),
- "bias": (64,),
- "flatten": True},
- {"data": (32, 3, 256, 256),
- "num_hidden": 64,
- "weight": (64, 256),
- "bias": (64,),
- "flatten": False}],
- warmup=warmup,
- runs=runs)
+def run_nn_basic_operators_benchmarks(ctx=mx.cpu(), dtype='float32',
profiler='native', large_tensor='off', warmup=25, runs=100):
+ """Runs benchmarks with the given context, precision (dtype), and data
size (large_tensor) for all the basic neural network
+ operators in MXNet.
- # Dropout benchmarks
- dropout_benchmark_res = run_performance_test([getattr(MX_OP_MODULE,
"Dropout")],
- run_backward=True,
- dtype=dtype,
- ctx=ctx,
- profiler=profiler,
- inputs=[{"data": (32, 3, 256,
256),
- "p": 0.5,
- "mode": "always"},
- {"data": (10000, 10),
- "p": 0.5,
- "mode": "always"}],
- warmup=warmup,
- runs=runs)
- # BatchNorm benchmarks
- batchnorm_benchmark_res = run_performance_test([getattr(MX_OP_MODULE,
"BatchNorm")],
- run_backward=True,
- dtype=dtype,
- ctx=ctx,
- profiler=profiler,
- inputs=[{"data": (32, 3,
256, 256),
- "gamma": (3,),
- "beta": (3,),
- "moving_mean":
(3,),
- "moving_var":
(3,)},
- {"data": (32, 3,
10000, 10),
- "gamma": (3,),
- "beta": (3,),
- "moving_mean":
(3,),
- "moving_var":
(3,)}],
- warmup=warmup,
- runs=runs)
+ Parameters
+ ----------
+ ctx: mx.ctx
+ Context to run benchmarks
+ dtype: str, default 'float32'
+ Precision to use for benchmarks
+ large_tensor: str, default 'off'
+ Tensor size to use for tests
+ warmup: int, default 25
+ Number of times to run for warmup
+ runs: int, default 100
+ Number of runs to capture benchmark results
+
+ Returns
+ -------
+ Dictionary of results. Key -> Name of the operator, Value -> Benchmark
results.
+
+ """
+ if large_tensor == 'on':
+ # FullyConnnected operator benchmarks
+ fc_benchmark_res = run_performance_test([getattr(MX_OP_MODULE,
"FullyConnected")],
+ run_backward=True,
+ dtype=dtype,
+ ctx=ctx,
+ profiler=profiler,
+ inputs=[{"data": (2**15, 3,
256, 256),
+ "num_hidden": 64,
+ "weight": (64, 3 *
256 * 256),
+ "bias": (64,),
+ "flatten": True},
+ {"data": (2**17, 3,
128, 128),
+ "num_hidden": 64,
+ "weight": (64, 3 *
128 * 128),
+ "bias": (64,),
+ "flatten": False}],
+ warmup=warmup,
+ runs=runs)
+
+ # Dropout benchmarks
+ dropout_benchmark_res = run_performance_test([getattr(MX_OP_MODULE,
"Dropout")],
+ run_backward=True,
+ dtype=dtype,
+ ctx=ctx,
+ profiler=profiler,
+ inputs=[{"data": (2**15,
3, 256, 256),
+ "p": 0.5,
+ "mode":
"always"},
+ {"data": (2**28,
16),
+ "p": 0.5,
+ "mode":
"always"}],
+ warmup=warmup,
+ runs=runs)
+ # BatchNorm benchmarks
+ batchnorm_benchmark_res = run_performance_test([getattr(MX_OP_MODULE,
"BatchNorm")],
+ run_backward=True,
+ dtype=dtype,
+ ctx=ctx,
+ profiler=profiler,
+ inputs=[{"data":
(2**15, 3, 256, 256),
+ "gamma": (3,),
+ "beta": (3,),
+ "moving_mean":
(3,),
+ "moving_var":
(3,)},
+ {"data":
(2**14, 3, 10000, 10),
+ "gamma": (3,),
+ "beta": (3,),
+ "moving_mean":
(3,),
+ "moving_var":
(3,)}],
+ warmup=warmup,
+ runs=runs)
+ else:
Review comment:
It seems the only difference between the if and else branch is the `inputs`
argument. Can we only generate different inputs in the if/else branch and pass
them to the same operator function?
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
With regards,
Apache Git Services
