Caenorst commented on a change in pull request #16408: Add MXNet Ops for fast
multihead attention
URL: https://github.com/apache/incubator-mxnet/pull/16408#discussion_r341173650
##########
File path: tests/python/gpu/test_operator_gpu.py
##########
@@ -2493,13 +2493,334 @@ def test_arange_like_dtype():
x = mx.sym.Variable('x', dtype=t)
y = mx.sym.reshape(x, shape=(0, 0, -1))
z = mx.sym.contrib.arange_like(y, axis=-1)
-
+
mod = z.simple_bind(ctx=mx.gpu(0), x=(3, 4, 5, 6), grad_req='null')
mod.arg_arrays[0][:] =
np.random.normal(size=mod.arg_arrays[0].shape).astype(t)
out = mod.forward(is_train=False)
for v in out:
assert v.dtype == t
+@with_seed()
+def check_multihead_attention_selfatt(bwd_ignore_zero_init):
+ def convert_weight(F, q_weight, k_weight, v_weight, num_heads):
+ q_weight = F.reshape(q_weight, shape=(num_heads, -1, 0), reverse=True)
+ k_weight = F.reshape(k_weight, shape=(num_heads, -1, 0), reverse=True)
+ v_weight = F.reshape(v_weight, shape=(num_heads, -1, 0), reverse=True)
+ all_weights = F.concat(q_weight, k_weight, v_weight, dim=-2)
+ all_weights = F.reshape(all_weights, shape=(-1, 0), reverse=True)
+ return all_weights
+
+ def convert_bias(F, q_bias, k_bias, v_bias, num_heads):
+ q_bias = F.reshape(q_bias, shape=(num_heads, -1))
+ k_bias = F.reshape(k_bias, shape=(num_heads, -1))
+ v_bias = F.reshape(v_bias, shape=(num_heads, -1))
+ all_bias = F.stack(q_bias, k_bias, v_bias, axis=1)
+ all_bias = F.reshape(all_bias, shape=(-1,))
+ return all_bias
+
+ dtype='float16'
Review comment:
I just added test for fp32 (the ops support fp16 and fp32), and a warning on
bwd_ignore_zero_init to avoid usage without kAddTo functionality
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
With regards,
Apache Git Services