xiaosibai commented on code in PR #14423:
URL: https://github.com/apache/tvm/pull/14423#discussion_r1161512466
##########
python/tvm/relay/frontend/tflite.py:
##########
@@ -2959,6 +2977,138 @@ def convert_batch_to_space_nd(self, op):
return out
+ def convert_batch_matmul(self, op):
+ """batch_matmul implementation."""
+ try:
+ from tflite.BatchMatMulOptions import BatchMatMulOptions
+ except ImportError:
+ raise ImportError("The tflite package must be installed")
+
+ input_tensors = self.get_input_tensors(op)
+ output_tensor = self.get_output_tensors(op)
+
+ assert len(input_tensors) == 2, "two input tensor arguments expected"
+
+ batch_matmul_options = BatchMatMulOptions()
+ op_options = op.BuiltinOptions()
+ batch_matmul_options.Init(op_options.Bytes, op_options.Pos)
+
+ input_a = self.get_expr(input_tensors[0].tensor_idx)
+ input_b = self.get_expr(input_tensors[1].tensor_idx)
+
+ shape_a = shape_of(input_a)
+ shape_b = shape_of(input_b)
+ rank_a = _infer_shape(shape_a)[0]
+ rank_b = _infer_shape(shape_b)[0]
+
+ if rank_a > 2 or rank_b > 2:
+ # Determine the output batch dimension
+ new_a_shape = shape_a
+ new_b_shape = shape_b
+ if rank_a > rank_b:
+ rank_diff = rank_a - rank_b
+ new_b_shape = _op.concatenate(
+ [
+ _expr.const([1] * rank_diff,
dtype=_infer_type(b_shape).checked_type.dtype),
+ shape_b,
+ ],
+ 0,
+ )
+ elif rank_a < rank_b:
+ rank_diff = rank_b - rank_a
+ new_a_shape = _op.concatenate(
+ [
+ _expr.const([1] * rank_diff,
dtype=_infer_type(a_shape).checked_type.dtype),
+ shape_a,
+ ],
+ 0,
+ )
+ else:
+ pass
+
+ out_batch = _op.concatenate(
+ [
+ _op.maximum(
+ _op.strided_slice(new_b_shape, [i], [i + 1]),
+ _op.strided_slice(new_a_shape, [i], [i + 1]),
+ )
+ for i in range(max(rank_a, rank_b) - 2)
+ ],
+ 0,
+ )
+
+ out_batch_shape = _fold_constant(out_batch)
+
+ a_broadcasted_shape = _fold_constant(
+ _op.concatenate(
+ [
+ out_batch,
+ _op.strided_slice(shape_a, [rank_a - 2], [rank_a]),
+ ],
+ 0,
+ )
+ )
+ b_broadcasted_shape = _fold_constant(
+ _op.concatenate(
+ [
+ out_batch,
+ _op.strided_slice(shape_b, [rank_b - 2], [rank_b]),
+ ],
+ 0,
+ )
+ )
+ if not tvm.ir.structural_equal(shape_a, a_broadcasted_shape):
+ input_a = _op.transform.broadcast_to(a, a_broadcasted_shape)
+ if not tvm.ir.structural_equal(shape_b, b_broadcasted_shape):
+ input_b = _op.transform.broadcast_to(b, b_broadcasted_shape)
Review Comment:
"b" is not define
##########
python/tvm/relay/frontend/tflite.py:
##########
@@ -2959,6 +2977,138 @@ def convert_batch_to_space_nd(self, op):
return out
+ def convert_batch_matmul(self, op):
+ """batch_matmul implementation."""
+ try:
+ from tflite.BatchMatMulOptions import BatchMatMulOptions
+ except ImportError:
+ raise ImportError("The tflite package must be installed")
+
+ input_tensors = self.get_input_tensors(op)
+ output_tensor = self.get_output_tensors(op)
+
+ assert len(input_tensors) == 2, "two input tensor arguments expected"
+
+ batch_matmul_options = BatchMatMulOptions()
+ op_options = op.BuiltinOptions()
+ batch_matmul_options.Init(op_options.Bytes, op_options.Pos)
+
+ input_a = self.get_expr(input_tensors[0].tensor_idx)
+ input_b = self.get_expr(input_tensors[1].tensor_idx)
+
+ shape_a = shape_of(input_a)
+ shape_b = shape_of(input_b)
+ rank_a = _infer_shape(shape_a)[0]
+ rank_b = _infer_shape(shape_b)[0]
+
+ if rank_a > 2 or rank_b > 2:
+ # Determine the output batch dimension
+ new_a_shape = shape_a
+ new_b_shape = shape_b
+ if rank_a > rank_b:
+ rank_diff = rank_a - rank_b
+ new_b_shape = _op.concatenate(
+ [
+ _expr.const([1] * rank_diff,
dtype=_infer_type(b_shape).checked_type.dtype),
+ shape_b,
+ ],
+ 0,
+ )
+ elif rank_a < rank_b:
+ rank_diff = rank_b - rank_a
+ new_a_shape = _op.concatenate(
+ [
+ _expr.const([1] * rank_diff,
dtype=_infer_type(a_shape).checked_type.dtype),
+ shape_a,
+ ],
+ 0,
+ )
+ else:
+ pass
+
+ out_batch = _op.concatenate(
+ [
+ _op.maximum(
+ _op.strided_slice(new_b_shape, [i], [i + 1]),
+ _op.strided_slice(new_a_shape, [i], [i + 1]),
+ )
+ for i in range(max(rank_a, rank_b) - 2)
+ ],
+ 0,
+ )
+
+ out_batch_shape = _fold_constant(out_batch)
+
+ a_broadcasted_shape = _fold_constant(
+ _op.concatenate(
+ [
+ out_batch,
+ _op.strided_slice(shape_a, [rank_a - 2], [rank_a]),
+ ],
+ 0,
+ )
+ )
+ b_broadcasted_shape = _fold_constant(
+ _op.concatenate(
+ [
+ out_batch,
+ _op.strided_slice(shape_b, [rank_b - 2], [rank_b]),
+ ],
+ 0,
+ )
+ )
+ if not tvm.ir.structural_equal(shape_a, a_broadcasted_shape):
+ input_a = _op.transform.broadcast_to(a, a_broadcasted_shape)
Review Comment:
"a" is not define?
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]