alexwong commented on a change in pull request #5272: [BYOC] Add example of
Composite + Annotate for DNNL fused op
URL: https://github.com/apache/incubator-tvm/pull/5272#discussion_r405782682
##########
File path: tests/python/relay/test_pass_partition_graph.py
##########
@@ -856,6 +857,111 @@ def expected():
partitioned = transform.PartitionGraph()(mod)
assert tvm.ir.structural_equal(partitioned, ref_mod, map_free_vars=True)
+
+def test_partition_conv_bias_relu():
+ def make_pattern():
+ data = relay.var("data", relay.TensorType((1, 3, 224, 224), "float32"))
+ weight = relay.var("weight")
+ bias = relay.var("bias")
+ conv = relay.nn.conv2d(data=data, weight=weight, kernel_size=(3, 3),
+ channels=8, padding=(1, 1))
+ add = relay.add(conv, bias)
+ return relay.nn.relu(add)
+
+ def get_blocks(prefix, data, in_channel, out_channel,
+ include_bn=True, include_sigmoid=False):
+ weight = relay.var(prefix + "weight")
+ bn_gamma = relay.var(prefix + "bn_gamma")
+ bn_beta = relay.var(prefix + "bn_beta")
+ bn_mmean = relay.var(prefix + "bn_mean")
+ bn_mvar = relay.var(prefix + "bn_var")
+
+ layer = relay.nn.conv2d(data=data, weight=weight, kernel_size=(3, 3),
+ channels=out_channel, padding=(1, 1))
+ if include_bn:
+ bn_output = relay.nn.batch_norm(layer, bn_gamma, bn_beta,
+ bn_mmean, bn_mvar)
+ layer = bn_output[0]
+ if include_sigmoid:
+ # dummy layer to prevent pattern detection
+ layer = relay.sigmoid(layer)
+ layer = relay.nn.relu(layer)
+ return layer
+
+ def get_net(include_bn=True, include_sigmoid=False):
+ data = relay.var("data", relay.TensorType((1, 3, 224, 224), "float32"))
+ layer1 = get_blocks("layer1_", data, 3, 8, include_bn, include_sigmoid)
+ layer2 = get_blocks("layer2_", layer1, 8, 8, include_bn,
include_sigmoid)
+ return relay.Function(relay.analysis.free_vars(layer2), layer2)
+
+ def get_partitoned_mod(mod, params):
+ # This is required for constant folding
+ mod["main"] = bind_params_by_name(mod["main"], params)
+ pattern_table = [
+ ("dnnl.conv_bias_relu", make_pattern())
+ ]
+ remove_bn_pass = transform.Sequential([
+ transform.InferType(),
+ transform.SimplifyInference(),
+ transform.FoldConstant(),
+ transform.FoldScaleAxis(),
+ ])
+ composite_partition = transform.Sequential([
+ remove_bn_pass,
+ transform.MergeComposite(pattern_table),
+ transform.AnnotateTarget("dnnl"),
Review comment:
> @alexwong Could you give some more detail on the issue you're facing? I'd
be happy to take a look.
An example is I have a very simple Relay graph here where conv2d and clip
are supported ops and squeeze and reshape are supported only if it's part of a
squeeze->reshape pattern, so I am using mergecomposite for that.
```
def @main(%data: Tensor[(1, 3, 224, 224), float32], %weight1: Tensor[(32, 3,
3, 3), float32]) -> Tensor[(3, 3, 3, 32), float32] {
%0 = nn.conv2d(%data, %weight1, padding=[1, 1, 1, 1], kernel_size=[3, 3])
/* ty=Tensor[(1, 32, 224, 224), float32] */;
%1 = clip(%0, a_min=0f, a_max=6f) /* ty=Tensor[(1, 32, 224, 224), float32]
*/;
%2 = squeeze(%1) /* ty=Tensor[(32, 224, 224), float32] */;
reshape(%2, newshape=[3, 3, 3, 32]) /* ty=Tensor[(3, 3, 3, 32), float32] */
}
```
After annotation I get:
```
def @main(%data: Tensor[(1, 3, 224, 224), float32], %weight1: Tensor[(32, 3,
3, 3), float32]) -> Tensor[(3, 3, 3, 32), float32] {
%0 = annotation.compiler_begin(%data, meta[relay.attrs.CompilerAttrs][0])
/* ty=Tensor[(1, 3, 224, 224), float32] */;
%1 = annotation.compiler_begin(%weight1,
meta[relay.attrs.CompilerAttrs][1]) /* ty=Tensor[(32, 3, 3, 3), float32] */;
%2 = nn.conv2d(%0, %1, padding=[1, 1, 1, 1], kernel_size=[3, 3]) /*
ty=Tensor[(1, 32, 224, 224), float32] */;
%3 = annotation.compiler_end(%2, meta[relay.attrs.CompilerAttrs][2]) /*
ty=Tensor[(1, 32, 224, 224), float32] */;
%4 = annotation.compiler_begin(%3, meta[relay.attrs.CompilerAttrs][3]) /*
ty=Tensor[(1, 32, 224, 224), float32] */;
%5 = clip(%4, a_min=0f, a_max=6f) /* ty=Tensor[(1, 32, 224, 224), float32]
*/;
%6 = annotation.compiler_end(%5, meta[relay.attrs.CompilerAttrs][4]) /*
ty=Tensor[(1, 32, 224, 224), float32] */;
%7 = annotation.compiler_begin(%6, meta[relay.attrs.CompilerAttrs][5]) /*
ty=Tensor[(1, 32, 224, 224), float32] */;
%9 = fn (%x: Tensor[(1, 32, 224, 224), float32],
Composite="compilername.squeeze") -> Tensor[(3, 3, 3, 32), float32] {
%8 = squeeze(%x) /* ty=Tensor[(32, 224, 224), float32] */;
reshape(%8, newshape=[3, 3, 3, 32]) /* ty=Tensor[(3, 3, 3, 32), float32]
*/
};
%10 = %9(%7) /* ty=Tensor[(3, 3, 3, 32), float32] */;
annotation.compiler_end(%10, meta[relay.attrs.CompilerAttrs][6]) /*
ty=Tensor[(3, 3, 3, 32), float32] */
}
```
This looks right to me but mergecompilerregions is throwing the following
error:
```
tvm._ffi.base.TVMError: TVMError: Cannot find the corresponding region for
start annotation:
v0.0.4
free_var %x: Tensor[(1, 32, 224, 224), float32]
%0 = annotation.compiler_begin(%x, meta[relay.attrs.CompilerAttrs][0]);
%1 = squeeze(%0);
%2 = annotation.compiler_end(%1, meta[relay.attrs.CompilerAttrs][1]);
annotation.compiler_begin(%2, meta[relay.attrs.CompilerAttrs][2])
// meta data omitted. you can use show_meta_data=True to include meta data
```
Not sure if it's expected to not work with composite functions but I think
there may be some missing logic there.
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
With regards,
Apache Git Services