This is an automated email from the ASF dual-hosted git repository.

tqchen pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-tvm.git


The following commit(s) were added to refs/heads/master by this push:
     new 538e296  [Relay][Refactor][std::string --> String] Relay updated with 
String (#5578)
538e296 is described below

commit 538e2963428707012a0349f65aad3cd701720073
Author: ANSHUMAN TRIPATHY <anshuma...@huawei.com>
AuthorDate: Tue May 19 03:43:11 2020 +0530

    [Relay][Refactor][std::string --> String] Relay updated with String (#5578)
---
 include/tvm/relay/base.h                          |  4 +--
 include/tvm/relay/expr.h                          |  6 ++--
 include/tvm/relay/op_strategy.h                   |  4 +--
 include/tvm/relay/transform.h                     |  6 ++--
 python/tvm/ir/json_compact.py                     |  1 +
 python/tvm/relay/expr.py                          |  2 +-
 src/relay/backend/compile_engine.cc               |  2 +-
 src/relay/ir/base.cc                              |  2 +-
 src/relay/ir/expr.cc                              |  2 +-
 src/relay/ir/op_strategy.cc                       |  4 +--
 src/relay/ir/transform.cc                         |  2 +-
 src/relay/op/algorithm/topk.cc                    |  2 +-
 src/relay/op/annotation/annotation.cc             |  4 +--
 src/relay/op/debug.cc                             |  2 +-
 src/relay/op/image/dilation2d.cc                  |  2 +-
 src/relay/op/image/resize.cc                      |  6 ++--
 src/relay/op/nn/bitserial.cc                      |  4 +--
 src/relay/op/nn/convolution.cc                    | 43 +++++++++++------------
 src/relay/op/nn/nn.cc                             |  4 +--
 src/relay/op/nn/pad.cc                            |  4 +--
 src/relay/op/nn/pooling.cc                        | 35 +++++++++---------
 src/relay/op/nn/upsampling.cc                     |  8 ++---
 src/relay/op/tensor/transform.cc                  |  4 +--
 src/relay/op/vision/rcnn_op.cc                    |  4 +--
 src/relay/qnn/op/convolution.cc                   |  4 +--
 src/relay/qnn/op/requantize.cc                    |  2 +-
 src/relay/quantize/quantize.cc                    |  2 +-
 src/relay/transforms/combine_parallel_op_batch.cc |  2 +-
 src/relay/transforms/forward_rewrite.cc           |  2 +-
 src/relay/transforms/legalize.cc                  |  2 +-
 src/relay/transforms/pattern_util.h               |  2 +-
 src/relay/transforms/to_a_normal_form.cc          |  2 +-
 tests/python/relay/test_json_compact.py           | 39 ++++++++++++++++++++
 33 files changed, 125 insertions(+), 89 deletions(-)

diff --git a/include/tvm/relay/base.h b/include/tvm/relay/base.h
index c78ab75..eeef7cd 100644
--- a/include/tvm/relay/base.h
+++ b/include/tvm/relay/base.h
@@ -93,7 +93,7 @@ class IdNode : public Object {
    *  this only acts as a hint to the user,
    *  and is not used for equality.
    */
-  std::string name_hint;
+  String name_hint;
 
   void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("name_hint", &name_hint); }
 
@@ -107,7 +107,7 @@ class Id : public ObjectRef {
    * \brief The constructor
    * \param name_hint The name of the variable.
    */
-  TVM_DLL explicit Id(std::string name_hint);
+  TVM_DLL explicit Id(String name_hint);
 
   TVM_DEFINE_OBJECT_REF_METHODS(Id, ObjectRef, IdNode);
 };
diff --git a/include/tvm/relay/expr.h b/include/tvm/relay/expr.h
index 69a60a7..779bcc3 100644
--- a/include/tvm/relay/expr.h
+++ b/include/tvm/relay/expr.h
@@ -170,7 +170,7 @@ class VarNode : public ExprNode {
   Type type_annotation;
 
   /*! \return The name hint of the variable */
-  const std::string& name_hint() const { return vid->name_hint; }
+  const String& name_hint() const { return vid->name_hint; }
 
   void VisitAttrs(tvm::AttrVisitor* v) {
     v->Visit("vid", &vid);
@@ -188,7 +188,7 @@ class VarNode : public ExprNode {
     hash_reduce.FreeVarHashImpl(this);
   }
 
-  TVM_DLL static Var make(std::string name_hint, Type type_annotation);
+  TVM_DLL static Var make(String name_hint, Type type_annotation);
 
   TVM_DLL static Var make(Id vid, Type type_annotation);
 
@@ -203,7 +203,7 @@ class Var : public Expr {
    * \param name_hint The name hint of a variable.
    * \param type_annotation The type annotation of a variable.
    */
-  TVM_DLL Var(std::string name_hint, Type type_annotation) : 
Var(Id(name_hint), type_annotation) {}
+  TVM_DLL Var(String name_hint, Type type_annotation) : Var(Id(name_hint), 
type_annotation) {}
 
   /*!
    * \brief The constructor
diff --git a/include/tvm/relay/op_strategy.h b/include/tvm/relay/op_strategy.h
index 3f5876d..c70da62 100644
--- a/include/tvm/relay/op_strategy.h
+++ b/include/tvm/relay/op_strategy.h
@@ -118,7 +118,7 @@ class OpSpecialization : public ObjectRef {
    * \param name Name of the implementation
    * \param plevel Priority level of the implementation
    */
-  TVM_DLL void AddImplementation(FTVMCompute fcompute, FTVMSchedule fschedule, 
std::string name,
+  TVM_DLL void AddImplementation(FTVMCompute fcompute, FTVMSchedule fschedule, 
String name,
                                  int plevel);
 
   TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(OpSpecialization, ObjectRef, 
OpSpecializationNode);
@@ -150,7 +150,7 @@ class OpStrategy : public ObjectRef {
    * \param name Name of the implementation
    * \param plevel Priority level of the implementation
    */
-  TVM_DLL void AddImplementation(FTVMCompute fcompute, FTVMSchedule fschedule, 
std::string name,
+  TVM_DLL void AddImplementation(FTVMCompute fcompute, FTVMSchedule fschedule, 
String name,
                                  int plevel);
 
   TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(OpStrategy, ObjectRef, OpStrategyNode);
diff --git a/include/tvm/relay/transform.h b/include/tvm/relay/transform.h
index 9a8ca84..8f55fdf 100644
--- a/include/tvm/relay/transform.h
+++ b/include/tvm/relay/transform.h
@@ -58,7 +58,7 @@ using Sequential = tvm::transform::Sequential;
  */
 TVM_DLL Pass CreateFunctionPass(
     const runtime::TypedPackedFunc<Function(Function, IRModule, PassContext)>& 
pass_func,
-    int opt_level, const std::string& name, const tvm::Array<runtime::String>& 
required);
+    int opt_level, const String& name, const tvm::Array<runtime::String>& 
required);
 
 /*! \brief Remove expressions which does not effect the program result.
  *
@@ -298,7 +298,7 @@ TVM_DLL Pass ConvertLayout(const Map<std::string, 
Array<String>>& desired_layout
  *
  * \return The pass.
  */
-TVM_DLL Pass Legalize(const std::string& legalize_map_attr_name = 
"FTVMLegalize");
+TVM_DLL Pass Legalize(const String& legalize_map_attr_name = "FTVMLegalize");
 
 /*!
  * \brief Canonicalize cast expressions to make operator fusion more efficient.
@@ -387,7 +387,7 @@ TVM_DLL Function InferType(const Function& f, const 
IRModule& mod, const GlobalV
  *                           an Expr consumed by multiple callers.
  * \return The rewritten expression.
  */
-TVM_DLL Expr ForwardRewrite(const Expr& expr, const std::string& 
rewrite_map_attr_name,
+TVM_DLL Expr ForwardRewrite(const Expr& expr, const String& 
rewrite_map_attr_name,
                             std::function<ObjectRef(const Call&)> fcontext = 
nullptr,
                             std::function<Expr(const Expr&)> 
fmulti_ref_trigger = nullptr);
 
diff --git a/python/tvm/ir/json_compact.py b/python/tvm/ir/json_compact.py
index a3ff499..2abfd81 100644
--- a/python/tvm/ir/json_compact.py
+++ b/python/tvm/ir/json_compact.py
@@ -111,6 +111,7 @@ def create_updater_06_to_07():
         "EnvFunc": _update_global_key,
         "relay.Op": _update_global_key,
         "relay.TypeVar": [_ftype_var, _update_from_std_str("name_hint")],
+        "relay.Id": [_update_from_std_str("name_hint")],
         "relay.GlobalTypeVar": [_ftype_var, _update_from_std_str("name_hint")],
         "relay.Type": _rename("Type"),
         "relay.TupleType": _rename("TupleType"),
diff --git a/python/tvm/relay/expr.py b/python/tvm/relay/expr.py
index 3e98e52..a428e1b 100644
--- a/python/tvm/relay/expr.py
+++ b/python/tvm/relay/expr.py
@@ -221,7 +221,7 @@ class Var(ExprWithOp):
     @property
     def name_hint(self):
         """Get name hint of the current var."""
-        name = self.vid.name_hint
+        name = str(self.vid.name_hint)
         return name
 
 
diff --git a/src/relay/backend/compile_engine.cc 
b/src/relay/backend/compile_engine.cc
index 31293a9..421b032 100644
--- a/src/relay/backend/compile_engine.cc
+++ b/src/relay/backend/compile_engine.cc
@@ -587,7 +587,7 @@ class CompileEngineImpl : public CompileEngineNode {
         auto symbol_name = src_func->GetAttr<String>(tvm::attr::kGlobalSymbol);
         CHECK(symbol_name.defined()) << "No external symbol is set for:\n"
                                      << AsText(src_func, false);
-        auto gv = GlobalVar(std::string(symbol_name.value()));
+        auto gv = GlobalVar(symbol_name.value());
         // No need to keep compiler attribute at this point, functions have 
been
         // extracted for specific codegen.
         src_func = WithAttr(std::move(src_func), attr::kCompiler, 
NullValue<ObjectRef>());
diff --git a/src/relay/ir/base.cc b/src/relay/ir/base.cc
index 37b0ff5..5f7b874 100644
--- a/src/relay/ir/base.cc
+++ b/src/relay/ir/base.cc
@@ -33,7 +33,7 @@ using namespace tvm::runtime;
 
 TVM_REGISTER_NODE_TYPE(IdNode);
 
-Id::Id(std::string name_hint) {
+Id::Id(String name_hint) {
   ObjectPtr<IdNode> n = make_object<IdNode>();
   n->name_hint = std::move(name_hint);
   data_ = std::move(n);
diff --git a/src/relay/ir/expr.cc b/src/relay/ir/expr.cc
index 5ac5805..c2f3aef 100644
--- a/src/relay/ir/expr.cc
+++ b/src/relay/ir/expr.cc
@@ -90,7 +90,7 @@ Var::Var(Id vid, Type type_annotation) {
 
 TVM_REGISTER_NODE_TYPE(VarNode);
 
-TVM_REGISTER_GLOBAL("relay.ir.Var").set_body_typed([](std::string str, Type 
type_annotation) {
+TVM_REGISTER_GLOBAL("relay.ir.Var").set_body_typed([](String str, Type 
type_annotation) {
   return Var(str, type_annotation);
 });
 
diff --git a/src/relay/ir/op_strategy.cc b/src/relay/ir/op_strategy.cc
index 989e3a6..a946b94 100644
--- a/src/relay/ir/op_strategy.cc
+++ b/src/relay/ir/op_strategy.cc
@@ -42,7 +42,7 @@ te::Schedule OpImplementation::Schedule(const Attrs& attrs, 
const Array<te::Tens
 }
 
 void OpSpecialization::AddImplementation(tvm::relay::FTVMCompute fcompute,
-                                         tvm::relay::FTVMSchedule fschedule, 
std::string name,
+                                         tvm::relay::FTVMSchedule fschedule, 
String name,
                                          int plevel) {
   auto n = make_object<OpImplementationNode>();
   n->fcompute = fcompute;
@@ -52,7 +52,7 @@ void 
OpSpecialization::AddImplementation(tvm::relay::FTVMCompute fcompute,
   (*this)->implementations.push_back(OpImplementation(n));
 }
 
-void OpStrategy::AddImplementation(FTVMCompute fcompute, FTVMSchedule 
fschedule, std::string name,
+void OpStrategy::AddImplementation(FTVMCompute fcompute, FTVMSchedule 
fschedule, String name,
                                    int plevel) {
   auto curr_cond = te::SpecializedCondition::Current();
   auto self = this->operator->();
diff --git a/src/relay/ir/transform.cc b/src/relay/ir/transform.cc
index 6b99c93..6942df2 100644
--- a/src/relay/ir/transform.cc
+++ b/src/relay/ir/transform.cc
@@ -143,7 +143,7 @@ bool FunctionPassNode::SkipFunction(const Function& func) 
const {
 
 Pass CreateFunctionPass(
     const runtime::TypedPackedFunc<Function(Function, IRModule, PassContext)>& 
pass_func,
-    int opt_level, const std::string& name, const tvm::Array<runtime::String>& 
required) {
+    int opt_level, const String& name, const tvm::Array<runtime::String>& 
required) {
   PassInfo pass_info = PassInfo(opt_level, name, required);
   return FunctionPass(pass_func, pass_info);
 }
diff --git a/src/relay/op/algorithm/topk.cc b/src/relay/op/algorithm/topk.cc
index f641f84..5ff5904 100644
--- a/src/relay/op/algorithm/topk.cc
+++ b/src/relay/op/algorithm/topk.cc
@@ -64,7 +64,7 @@ bool TopKRel(const Array<Type>& types, int num_inputs, const 
Attrs& attrs,
   return true;
 }
 
-Expr MakeTopK(Expr data, int k, int axis, std::string ret_type, bool 
is_ascend, DataType dtype) {
+Expr MakeTopK(Expr data, int k, int axis, String ret_type, bool is_ascend, 
DataType dtype) {
   auto attrs = make_object<TopKAttrs>();
   attrs->k = k;
   attrs->axis = axis;
diff --git a/src/relay/op/annotation/annotation.cc 
b/src/relay/op/annotation/annotation.cc
index 2e93b58..6be9b0d 100644
--- a/src/relay/op/annotation/annotation.cc
+++ b/src/relay/op/annotation/annotation.cc
@@ -183,7 +183,7 @@ Beginning of a region that is handled by a given compiler.
                            });
 
 TVM_REGISTER_GLOBAL("relay.op.annotation._make.compiler_begin")
-    .set_body_typed([](Expr expr, std::string compiler) {
+    .set_body_typed([](Expr expr, String compiler) {
       auto attrs = make_object<CompilerAttrs>();
       attrs->compiler = compiler;
       static const Op& op = Op::Get("annotation.compiler_begin");
@@ -207,7 +207,7 @@ End of a region that is handled by a given compiler.
                            });
 
 TVM_REGISTER_GLOBAL("relay.op.annotation._make.compiler_end")
-    .set_body_typed([](Expr expr, std::string compiler) {
+    .set_body_typed([](Expr expr, String compiler) {
       auto attrs = make_object<CompilerAttrs>();
       attrs->compiler = compiler;
       static const Op& op = Op::Get("annotation.compiler_end");
diff --git a/src/relay/op/debug.cc b/src/relay/op/debug.cc
index 790f1ee..56b7d44 100644
--- a/src/relay/op/debug.cc
+++ b/src/relay/op/debug.cc
@@ -54,7 +54,7 @@ RELAY_REGISTER_OP("debug")
     .set_attr<TOpPattern>("TOpPattern", kOpaque)
     .set_attr<FTVMCompute>("FTVMCompute", DebugCompute);
 
-Expr MakeDebug(Expr expr, std::string name) {
+Expr MakeDebug(Expr expr, String name) {
   auto dattrs = make_object<DebugAttrs>();
   if (name.size() > 0) {
     dattrs->debug_func = EnvFunc::Get(name);
diff --git a/src/relay/op/image/dilation2d.cc b/src/relay/op/image/dilation2d.cc
index 43ec856..462f11f 100644
--- a/src/relay/op/image/dilation2d.cc
+++ b/src/relay/op/image/dilation2d.cc
@@ -46,7 +46,7 @@ Array<Array<Layout> > Dilation2DInferCorrectLayout(const 
Attrs& attrs,
 // Positional relay function to create dilation2d operator
 // used by frontend FFI.
 Expr MakeDilation2D(Expr data, Expr weight, Array<IndexExpr> strides, 
Array<IndexExpr> padding,
-                    Array<IndexExpr> dilations, std::string data_layout, 
std::string kernel_layout,
+                    Array<IndexExpr> dilations, String data_layout, String 
kernel_layout,
                     DataType out_dtype) {
   auto attrs = make_object<Dilation2DAttrs>();
   attrs->strides = std::move(strides);
diff --git a/src/relay/op/image/resize.cc b/src/relay/op/image/resize.cc
index efd815b..7ad96b4 100644
--- a/src/relay/op/image/resize.cc
+++ b/src/relay/op/image/resize.cc
@@ -64,8 +64,8 @@ bool ResizeRel(const Array<Type>& types, int num_inputs, 
const Attrs& attrs,
 
 // Positional relay function to create image operator
 // used by frontend FFI.
-Expr MakeResize(Expr data, Array<IndexExpr> size, std::string layout, 
std::string method,
-                std::string coordinate_transformation_mode, DataType 
out_dtype) {
+Expr MakeResize(Expr data, Array<IndexExpr> size, String layout, String method,
+                String coordinate_transformation_mode, DataType out_dtype) {
   auto attrs = make_object<ResizeAttrs>();
   attrs->size = std::move(size);
   attrs->layout = std::move(layout);
@@ -133,7 +133,7 @@ bool CropAndResizeRel(const Array<Type>& types, int 
num_inputs, const Attrs& att
 }
 
 Expr MakeCropAndResize(Expr data, Expr boxes, Expr box_indices, 
Array<IndexExpr> crop_size,
-                       std::string layout, std::string method, double 
extrapolation_value,
+                       String layout, String method, double 
extrapolation_value,
                        DataType out_dtype) {
   auto attrs = make_object<CropAndResizeAttrs>();
   attrs->crop_size = std::move(crop_size);
diff --git a/src/relay/op/nn/bitserial.cc b/src/relay/op/nn/bitserial.cc
index 08637d9..022ca5c 100644
--- a/src/relay/op/nn/bitserial.cc
+++ b/src/relay/op/nn/bitserial.cc
@@ -86,7 +86,7 @@ bool BitPackRel(const Array<Type>& types, int num_inputs, 
const Attrs& attrs,
 }
 
 Expr MakeBitPack(Expr data, int bits, int pack_axis, int bit_axis, DataType 
pack_type,
-                 std::string name) {
+                 String name) {
   auto attrs = make_object<BitPackAttrs>();
   attrs->bits = bits;
   attrs->pack_axis = pack_axis;
@@ -150,7 +150,7 @@ bool BinaryConv2DRel(const Array<Type>& types, int 
num_inputs, const Attrs& attr
 // used by frontend FFI.
 Expr MakeBinaryConv2D(Expr data, Expr weight, Array<IndexExpr> strides, 
Array<IndexExpr> padding,
                       IndexExpr channels, Array<IndexExpr> kernel_size, int 
activation_bits,
-                      int weight_bits, std::string data_layout, std::string 
kernel_layout,
+                      int weight_bits, String data_layout, String 
kernel_layout,
                       DataType pack_dtype, DataType out_dtype, bool unipolar) {
   auto attrs = make_object<BinaryConv2DAttrs>();
   attrs->strides = std::move(strides);
diff --git a/src/relay/op/nn/convolution.cc b/src/relay/op/nn/convolution.cc
index 4a307c5..4770cd8 100644
--- a/src/relay/op/nn/convolution.cc
+++ b/src/relay/op/nn/convolution.cc
@@ -134,8 +134,8 @@ TVM_REGISTER_NODE_TYPE(Conv1DAttrs);
 TVM_REGISTER_GLOBAL("relay.op.nn._make.conv1d")
     .set_body_typed([](Expr data, Expr weight, Array<IndexExpr> strides, 
Array<IndexExpr> padding,
                        Array<IndexExpr> dilation, int groups, IndexExpr 
channels,
-                       Array<IndexExpr> kernel_size, std::string data_layout,
-                       std::string kernel_layout, std::string out_layout, 
DataType out_dtype) {
+                       Array<IndexExpr> kernel_size, String data_layout, 
String kernel_layout,
+                       String out_layout, DataType out_dtype) {
       return MakeConv<Conv1DAttrs>(data, weight, strides, padding, dilation, 
groups, channels,
                                    kernel_size, data_layout, kernel_layout, 
out_layout, out_dtype,
                                    "nn.conv1d");
@@ -168,8 +168,8 @@ TVM_REGISTER_NODE_TYPE(Conv2DAttrs);
 TVM_REGISTER_GLOBAL("relay.op.nn._make.conv2d")
     .set_body_typed([](Expr data, Expr weight, Array<IndexExpr> strides, 
Array<IndexExpr> padding,
                        Array<IndexExpr> dilation, int groups, IndexExpr 
channels,
-                       Array<IndexExpr> kernel_size, std::string data_layout,
-                       std::string kernel_layout, std::string out_layout, 
DataType out_dtype) {
+                       Array<IndexExpr> kernel_size, String data_layout, 
String kernel_layout,
+                       String out_layout, DataType out_dtype) {
       return MakeConv<Conv2DAttrs>(data, weight, strides, padding, dilation, 
groups, channels,
                                    kernel_size, data_layout, kernel_layout, 
out_layout, out_dtype,
                                    "nn.conv2d");
@@ -202,8 +202,8 @@ TVM_REGISTER_NODE_TYPE(Conv3DAttrs);
 TVM_REGISTER_GLOBAL("relay.op.nn._make.conv3d")
     .set_body_typed([](Expr data, Expr weight, Array<IndexExpr> strides, 
Array<IndexExpr> padding,
                        Array<IndexExpr> dilation, int groups, IndexExpr 
channels,
-                       Array<IndexExpr> kernel_size, std::string data_layout,
-                       std::string kernel_layout, std::string out_layout, 
DataType out_dtype) {
+                       Array<IndexExpr> kernel_size, String data_layout, 
String kernel_layout,
+                       String out_layout, DataType out_dtype) {
       return MakeConv<Conv3DAttrs>(data, weight, strides, padding, dilation, 
groups, channels,
                                    kernel_size, data_layout, kernel_layout, 
out_layout, out_dtype,
                                    "nn.conv3d");
@@ -237,9 +237,8 @@ TVM_REGISTER_NODE_TYPE(Conv2DTransposeAttrs);
 TVM_REGISTER_GLOBAL("relay.op.nn._make.conv2d_transpose")
     .set_body_typed([](Expr data, Expr weight, Array<IndexExpr> strides, 
Array<IndexExpr> padding,
                        Array<IndexExpr> dilation, int groups, IndexExpr 
channels,
-                       Array<IndexExpr> kernel_size, std::string data_layout,
-                       std::string kernel_layout, std::string out_layout,
-                       Array<IndexExpr> output_padding, DataType out_dtype) {
+                       Array<IndexExpr> kernel_size, String data_layout, 
String kernel_layout,
+                       String out_layout, Array<IndexExpr> output_padding, 
DataType out_dtype) {
       return MakeConvTranspose<Conv2DTransposeAttrs>(
           data, weight, strides, padding, dilation, groups, channels, 
kernel_size, data_layout,
           kernel_layout, out_layout, output_padding, out_dtype, 
"nn.conv2d_transpose");
@@ -282,9 +281,8 @@ TVM_REGISTER_NODE_TYPE(Conv1DTransposeAttrs);
 TVM_REGISTER_GLOBAL("relay.op.nn._make.conv1d_transpose")
     .set_body_typed([](Expr data, Expr weight, Array<IndexExpr> strides, 
Array<IndexExpr> padding,
                        Array<IndexExpr> dilation, int groups, IndexExpr 
channels,
-                       Array<IndexExpr> kernel_size, std::string data_layout,
-                       std::string kernel_layout, std::string out_layout,
-                       Array<IndexExpr> output_padding, DataType out_dtype) {
+                       Array<IndexExpr> kernel_size, String data_layout, 
String kernel_layout,
+                       String out_layout, Array<IndexExpr> output_padding, 
DataType out_dtype) {
       return MakeConvTranspose<Conv1DTransposeAttrs>(
           data, weight, strides, padding, dilation, groups, channels, 
kernel_size, data_layout,
           kernel_layout, out_layout, output_padding, out_dtype, 
"nn.conv1d_transpose");
@@ -324,8 +322,8 @@ TVM_REGISTER_NODE_TYPE(Conv2DWinogradAttrs);
 
TVM_REGISTER_GLOBAL("relay.op.nn._make.contrib_conv2d_winograd_without_weight_transform")
     .set_body_typed([](Expr data, Expr weight, int tile_size, Array<IndexExpr> 
strides,
                        Array<IndexExpr> padding, Array<IndexExpr> dilation, 
int groups,
-                       IndexExpr channels, Array<IndexExpr> kernel_size, 
std::string data_layout,
-                       std::string kernel_layout, std::string out_layout, 
DataType out_dtype) {
+                       IndexExpr channels, Array<IndexExpr> kernel_size, 
String data_layout,
+                       String kernel_layout, String out_layout, DataType 
out_dtype) {
       return MakeConvWinograd<Conv2DWinogradAttrs>(
           data, weight, tile_size, strides, padding, dilation, groups, 
channels, kernel_size,
           data_layout, kernel_layout, out_layout, out_dtype,
@@ -382,8 +380,8 @@ TVM_REGISTER_NODE_TYPE(Conv3DWinogradAttrs);
 
TVM_REGISTER_GLOBAL("relay.op.nn._make.contrib_conv3d_winograd_without_weight_transform")
     .set_body_typed([](Expr data, Expr weight, int tile_size, Array<IndexExpr> 
strides,
                        Array<IndexExpr> padding, Array<IndexExpr> dilation, 
int groups,
-                       IndexExpr channels, Array<IndexExpr> kernel_size, 
std::string data_layout,
-                       std::string kernel_layout, std::string out_layout, 
DataType out_dtype) {
+                       IndexExpr channels, Array<IndexExpr> kernel_size, 
String data_layout,
+                       String kernel_layout, String out_layout, DataType 
out_dtype) {
       return MakeConvWinograd<Conv3DWinogradAttrs>(
           data, weight, tile_size, strides, padding, dilation, groups, 
channels, kernel_size,
           data_layout, kernel_layout, out_layout, out_dtype,
@@ -466,8 +464,8 @@ weight transformation in advance.
 TVM_REGISTER_GLOBAL("relay.op.nn._make.contrib_conv2d_NCHWc")
     .set_body_typed([](Expr data, Expr weight, Array<IndexExpr> strides, 
Array<IndexExpr> padding,
                        Array<IndexExpr> dilation, int groups, IndexExpr 
channels,
-                       Array<IndexExpr> kernel_size, std::string data_layout,
-                       std::string kernel_layout, std::string out_layout, 
DataType out_dtype) {
+                       Array<IndexExpr> kernel_size, String data_layout, 
String kernel_layout,
+                       String out_layout, DataType out_dtype) {
       return MakeConv<Conv2DAttrs>(data, weight, strides, padding, dilation, 
groups, channels,
                                    kernel_size, data_layout, kernel_layout, 
out_layout, out_dtype,
                                    "nn.contrib_conv2d_NCHWc");
@@ -493,8 +491,8 @@ RELAY_REGISTER_OP("nn.contrib_conv2d_NCHWc")
 TVM_REGISTER_GLOBAL("relay.op.nn._make.contrib_depthwise_conv2d_NCHWc")
     .set_body_typed([](Expr data, Expr weight, Array<IndexExpr> strides, 
Array<IndexExpr> padding,
                        Array<IndexExpr> dilation, int groups, IndexExpr 
channels,
-                       Array<IndexExpr> kernel_size, std::string data_layout,
-                       std::string kernel_layout, std::string out_layout, 
DataType out_dtype) {
+                       Array<IndexExpr> kernel_size, String data_layout, 
String kernel_layout,
+                       String out_layout, DataType out_dtype) {
       return MakeConv<Conv2DAttrs>(data, weight, strides, padding, dilation, 
groups, channels,
                                    kernel_size, data_layout, kernel_layout, 
out_layout, out_dtype,
                                    "nn.contrib_depthwise_conv2d_NCHWc");
@@ -550,9 +548,8 @@ by concating all the *g* results.
 TVM_REGISTER_GLOBAL("relay.op.nn._make.deformable_conv2d")
     .set_body_typed([](Expr data, Expr offset, Expr weight, Array<IndexExpr> 
strides,
                        Array<IndexExpr> padding, Array<IndexExpr> dilation, 
int deformable_groups,
-                       int groups, int channels, Array<IndexExpr> kernel_size,
-                       std::string data_layout, std::string kernel_layout, 
std::string out_layout,
-                       DataType out_dtype) {
+                       int groups, int channels, Array<IndexExpr> kernel_size, 
String data_layout,
+                       String kernel_layout, String out_layout, DataType 
out_dtype) {
       return MakeDeformableConv<DeformableConv2DAttrs>(
           data, offset, weight, strides, padding, dilation, deformable_groups, 
groups, channels,
           kernel_size, data_layout, kernel_layout, out_layout, out_dtype, 
"nn.deformable_conv2d");
diff --git a/src/relay/op/nn/nn.cc b/src/relay/op/nn/nn.cc
index 670878d..d65fc27 100644
--- a/src/relay/op/nn/nn.cc
+++ b/src/relay/op/nn/nn.cc
@@ -1031,7 +1031,7 @@ bool DepthToSpaceRel(const Array<Type>& types, int 
num_inputs, const Attrs& attr
 
 // Positional relay function to create DepthToSpace operator
 // used by frontend FFI
-Expr MakeDepthToSpace(Expr data, int block_size, std::string layout, 
std::string mode) {
+Expr MakeDepthToSpace(Expr data, int block_size, String layout, String mode) {
   auto attrs = make_object<SubPixelAttrs>();
   attrs->block_size = block_size;
   attrs->layout = std::move(layout);
@@ -1088,7 +1088,7 @@ bool SpaceToDepthRel(const Array<Type>& types, int 
num_inputs, const Attrs& attr
 
 // Positional relay function to create SpaceToDepth operator
 // used by frontend FFI
-Expr MakeSpaceToDepth(Expr data, int block_size, std::string layout) {
+Expr MakeSpaceToDepth(Expr data, int block_size, String layout) {
   auto attrs = make_object<SubPixelAttrs>();
   attrs->block_size = block_size;
   attrs->layout = std::move(layout);
diff --git a/src/relay/op/nn/pad.cc b/src/relay/op/nn/pad.cc
index e416a06..aba87e2 100644
--- a/src/relay/op/nn/pad.cc
+++ b/src/relay/op/nn/pad.cc
@@ -177,7 +177,7 @@ Array<te::Tensor> PadCompute(const Attrs& attrs, const 
Array<te::Tensor>& inputs
 }
 
 // Handler to create a call to the padding op used by front-end FFI
-Expr MakePad(Expr data, Array<Array<IndexExpr>> pad_width, double pad_value, 
std::string pad_mode) {
+Expr MakePad(Expr data, Array<Array<IndexExpr>> pad_width, double pad_value, 
String pad_mode) {
   auto attrs = make_object<PadAttrs>();
   attrs->pad_value = pad_value;
   attrs->pad_width = std::move(pad_width);
@@ -245,7 +245,7 @@ bool MirrorPadRel(const Array<Type>& types, int num_inputs, 
const Attrs& attrs,
 }
 
 // Handler to create a call to the padding op used by front-end FFI
-Expr MakeMirrorPad(Expr data, Array<Array<IndexExpr>> pad_width, std::string 
mode) {
+Expr MakeMirrorPad(Expr data, Array<Array<IndexExpr>> pad_width, String mode) {
   auto attrs = make_object<MirrorPadAttrs>();
   attrs->mode = mode;
   attrs->pad_width = std::move(pad_width);
diff --git a/src/relay/op/nn/pooling.cc b/src/relay/op/nn/pooling.cc
index dd64951..e54a5f3 100644
--- a/src/relay/op/nn/pooling.cc
+++ b/src/relay/op/nn/pooling.cc
@@ -58,8 +58,7 @@ Array<Array<Layout> > PoolInferCorrectLayout(const Attrs& 
attrs,
 
 template <typename T>
 Expr MakeMaxPool(Expr data, Array<IndexExpr> pool_size, Array<IndexExpr> 
strides,
-                 Array<IndexExpr> padding, std::string layout, bool ceil_mode,
-                 std::string op_name) {
+                 Array<IndexExpr> padding, String layout, bool ceil_mode, 
String op_name) {
   auto attrs = make_object<T>();
   attrs->pool_size = std::move(pool_size);
   attrs->strides = std::move(strides);
@@ -72,8 +71,8 @@ Expr MakeMaxPool(Expr data, Array<IndexExpr> pool_size, 
Array<IndexExpr> strides
 
 template <typename T>
 Expr MakeAvgPool(Expr data, Array<IndexExpr> pool_size, Array<IndexExpr> 
strides,
-                 Array<IndexExpr> padding, std::string layout, bool ceil_mode,
-                 bool count_include_pad, std::string op_name) {
+                 Array<IndexExpr> padding, String layout, bool ceil_mode, bool 
count_include_pad,
+                 String op_name) {
   auto attrs = make_object<T>();
   attrs->pool_size = std::move(pool_size);
   attrs->strides = std::move(strides);
@@ -197,7 +196,7 @@ Array<te::Tensor> Pool2DCompute(const Attrs& attrs, const 
Array<te::Tensor>& inp
 
 TVM_REGISTER_GLOBAL("relay.op.nn._make.max_pool2d")
     .set_body_typed([](Expr data, Array<IndexExpr> pool_size, Array<IndexExpr> 
strides,
-                       Array<IndexExpr> padding, std::string layout, bool 
ceil_mode) {
+                       Array<IndexExpr> padding, String layout, bool 
ceil_mode) {
       return MakeMaxPool<MaxPool2DAttrs>(data, pool_size, strides, padding, 
layout, ceil_mode,
                                          "nn.max_pool2d");
     });
@@ -234,7 +233,7 @@ RELAY_REGISTER_OP("nn.max_pool2d")
 // AvgPool2D
 TVM_REGISTER_GLOBAL("relay.op.nn._make.avg_pool2d")
     .set_body_typed([](Expr data, Array<IndexExpr> pool_size, Array<IndexExpr> 
strides,
-                       Array<IndexExpr> padding, std::string layout, bool 
ceil_mode,
+                       Array<IndexExpr> padding, String layout, bool ceil_mode,
                        bool count_include_pad) {
       return MakeAvgPool<AvgPool2DAttrs>(data, pool_size, strides, padding, 
layout, ceil_mode,
                                          count_include_pad, "nn.avg_pool2d");
@@ -322,7 +321,7 @@ Array<te::Tensor> GlobalPool2DCompute(const Attrs& attrs, 
const Array<te::Tensor
   return Array<te::Tensor>{topi::nn::global_pool(inputs[0], mode, 
layout.name())};
 }
 
-Expr MakeGlobalAvgPool2D(Expr data, std::string layout) {
+Expr MakeGlobalAvgPool2D(Expr data, String layout) {
   auto attrs = make_object<GlobalPool2DAttrs>();
   attrs->layout = std::move(layout);
   static const Op& op = Op::Get("nn.global_avg_pool2d");
@@ -350,7 +349,7 @@ RELAY_REGISTER_OP("nn.global_avg_pool2d")
     .set_attr<FTVMCompute>("FTVMCompute", 
GlobalPool2DCompute<topi::nn::kAvgPool>);
 
 // GlobalMaxPool
-Expr MakeGlobalMaxPool2D(Expr data, std::string layout) {
+Expr MakeGlobalMaxPool2D(Expr data, String layout) {
   auto attrs = make_object<GlobalPool2DAttrs>();
   attrs->layout = std::move(layout);
   static const Op& op = Op::Get("nn.global_max_pool2d");
@@ -459,7 +458,7 @@ Array<te::Tensor> AdaptivePool2DCompute(const Attrs& attrs, 
const Array<te::Tens
 }
 
 // relay.nn.adaptive_avg_pool2d
-Expr MakeAdaptiveAvgPool2D(Expr data, Array<IndexExpr> output_size, 
std::string layout) {
+Expr MakeAdaptiveAvgPool2D(Expr data, Array<IndexExpr> output_size, String 
layout) {
   auto attrs = make_object<AdaptivePool2DAttrs>();
   attrs->output_size = std::move(output_size);
   attrs->layout = std::move(layout);
@@ -494,7 +493,7 @@ RELAY_REGISTER_OP("nn.adaptive_avg_pool2d")
     .set_attr<FTVMCompute>("FTVMCompute", 
AdaptivePool2DCompute<topi::nn::kAvgPool>);
 
 // relay.nn.adaptive_max_pool2d
-Expr MakeAdaptiveMaxPool2D(Expr data, Array<IndexExpr> output_size, 
std::string layout) {
+Expr MakeAdaptiveMaxPool2D(Expr data, Array<IndexExpr> output_size, String 
layout) {
   auto attrs = make_object<AdaptivePool2DAttrs>();
   attrs->output_size = std::move(output_size);
   attrs->layout = std::move(layout);
@@ -624,7 +623,7 @@ Array<te::Tensor> AdaptivePool3DCompute(const Attrs& attrs, 
const Array<te::Tens
 }
 
 // relay.nn.adaptive_max_pool3d
-Expr MakeAdaptiveMaxPool3D(Expr data, Array<IndexExpr> output_size, 
std::string layout) {
+Expr MakeAdaptiveMaxPool3D(Expr data, Array<IndexExpr> output_size, String 
layout) {
   auto attrs = make_object<AdaptivePool3DAttrs>();
   attrs->output_size = std::move(output_size);
   attrs->layout = std::move(layout);
@@ -659,7 +658,7 @@ RELAY_REGISTER_OP("nn.adaptive_max_pool3d")
     .set_attr<FTVMCompute>("FTVMCompute", 
AdaptivePool3DCompute<topi::nn::kMaxPool>);
 
 // relay.nn.adaptive_max_pool3d
-Expr MakeAdaptiveAvgPool3D(Expr data, Array<IndexExpr> output_size, 
std::string layout) {
+Expr MakeAdaptiveAvgPool3D(Expr data, Array<IndexExpr> output_size, String 
layout) {
   auto attrs = make_object<AdaptivePool3DAttrs>();
   attrs->output_size = std::move(output_size);
   attrs->layout = std::move(layout);
@@ -752,7 +751,7 @@ Array<te::Tensor> Pool2DGradCompute(const Attrs& attrs, 
const Array<te::Tensor>&
 
 // MaxPool2DGrad
 Expr MakeMaxPool2DGrad(Expr out_grad, Expr data, Array<IndexExpr> pool_size,
-                       Array<IndexExpr> strides, Array<IndexExpr> padding, 
std::string layout,
+                       Array<IndexExpr> strides, Array<IndexExpr> padding, 
String layout,
                        bool ceil_mode) {
   auto attrs = make_object<MaxPool2DAttrs>();
   attrs->pool_size = std::move(pool_size);
@@ -798,7 +797,7 @@ RELAY_REGISTER_OP("nn.max_pool2d_grad")
 
 // AvgPool2DGrad
 Expr MakeAvgPool2DGrad(Expr out_grad, Expr data, Array<IndexExpr> pool_size,
-                       Array<IndexExpr> strides, Array<IndexExpr> padding, 
std::string layout,
+                       Array<IndexExpr> strides, Array<IndexExpr> padding, 
String layout,
                        bool ceil_mode, bool count_include_pad) {
   auto attrs = make_object<AvgPool2DAttrs>();
   attrs->pool_size = std::move(pool_size);
@@ -933,7 +932,7 @@ Array<te::Tensor> Pool1DCompute(const Attrs& attrs, const 
Array<te::Tensor>& inp
 
 TVM_REGISTER_GLOBAL("relay.op.nn._make.max_pool1d")
     .set_body_typed([](Expr data, Array<IndexExpr> pool_size, Array<IndexExpr> 
strides,
-                       Array<IndexExpr> padding, std::string layout, bool 
ceil_mode) {
+                       Array<IndexExpr> padding, String layout, bool 
ceil_mode) {
       return MakeMaxPool<MaxPool1DAttrs>(data, pool_size, strides, padding, 
layout, ceil_mode,
                                          "nn.max_pool1d");
     });
@@ -968,7 +967,7 @@ RELAY_REGISTER_OP("nn.max_pool1d")
 // AvgPool1D
 TVM_REGISTER_GLOBAL("relay.op.nn._make.avg_pool1d")
     .set_body_typed([](Expr data, Array<IndexExpr> pool_size, Array<IndexExpr> 
strides,
-                       Array<IndexExpr> padding, std::string layout, bool 
ceil_mode,
+                       Array<IndexExpr> padding, String layout, bool ceil_mode,
                        bool count_include_pad) {
       return MakeAvgPool<AvgPool1DAttrs>(data, pool_size, strides, padding, 
layout, ceil_mode,
                                          count_include_pad, "nn.avg_pool1d");
@@ -1120,7 +1119,7 @@ Array<te::Tensor> Pool3DCompute(const Attrs& attrs, const 
Array<te::Tensor>& inp
 
 TVM_REGISTER_GLOBAL("relay.op.nn._make.max_pool3d")
     .set_body_typed([](Expr data, Array<IndexExpr> pool_size, Array<IndexExpr> 
strides,
-                       Array<IndexExpr> padding, std::string layout, bool 
ceil_mode) {
+                       Array<IndexExpr> padding, String layout, bool 
ceil_mode) {
       return MakeMaxPool<MaxPool3DAttrs>(data, pool_size, strides, padding, 
layout, ceil_mode,
                                          "nn.max_pool3d");
     });
@@ -1158,7 +1157,7 @@ RELAY_REGISTER_OP("nn.max_pool3d")
 // AvgPool3D
 TVM_REGISTER_GLOBAL("relay.op.nn._make.avg_pool3d")
     .set_body_typed([](Expr data, Array<IndexExpr> pool_size, Array<IndexExpr> 
strides,
-                       Array<IndexExpr> padding, std::string layout, bool 
ceil_mode,
+                       Array<IndexExpr> padding, String layout, bool ceil_mode,
                        bool count_include_pad) {
       return MakeAvgPool<AvgPool3DAttrs>(data, pool_size, strides, padding, 
layout, ceil_mode,
                                          count_include_pad, "nn.avg_pool3d");
diff --git a/src/relay/op/nn/upsampling.cc b/src/relay/op/nn/upsampling.cc
index 7f5e683..3228b72 100644
--- a/src/relay/op/nn/upsampling.cc
+++ b/src/relay/op/nn/upsampling.cc
@@ -91,8 +91,8 @@ bool UpSamplingRel(const Array<Type>& types, int num_inputs, 
const Attrs& attrs,
 
 // Positional relay function to create upsampling operator
 // used by frontend FFI.
-Expr MakeUpSampling(Expr data, double scale_h, double scale_w, std::string 
layout,
-                    std::string method, bool align_corners) {
+Expr MakeUpSampling(Expr data, double scale_h, double scale_w, String layout, 
String method,
+                    bool align_corners) {
   auto attrs = make_object<UpSamplingAttrs>();
   attrs->layout = std::move(layout);
   attrs->method = std::move(method);
@@ -160,8 +160,8 @@ bool UpSampling3DRel(const Array<Type>& types, int 
num_inputs, const Attrs& attr
 
 // Positional relay function to create upsampling3d operator
 // used by frontend FFI.
-Expr MakeUpSampling3D(Expr data, double scale_d, double scale_h, double 
scale_w, std::string layout,
-                      std::string method, std::string 
coordinate_transformation_mode) {
+Expr MakeUpSampling3D(Expr data, double scale_d, double scale_h, double 
scale_w, String layout,
+                      String method, String coordinate_transformation_mode) {
   auto attrs = make_object<UpSampling3DAttrs>();
   attrs->layout = std::move(layout);
   attrs->method = std::move(method);
diff --git a/src/relay/op/tensor/transform.cc b/src/relay/op/tensor/transform.cc
index 8b58946..892f3a4 100644
--- a/src/relay/op/tensor/transform.cc
+++ b/src/relay/op/tensor/transform.cc
@@ -882,7 +882,7 @@ Array<te::Tensor> TakeCompute(const Attrs& attrs, const 
Array<te::Tensor>& input
   }
 }
 
-Expr MakeTake(Expr data, Expr indices, Integer axis, std::string mode) {
+Expr MakeTake(Expr data, Expr indices, Integer axis, String mode) {
   auto attrs = make_object<TakeAttrs>();
   attrs->axis = std::move(axis);
   attrs->mode = std::move(mode);
@@ -2166,7 +2166,7 @@ bool LayoutTransformRel(const Array<Type>& types, int 
num_inputs, const Attrs& a
   return true;
 }
 
-Expr MakeLayoutTransform(Expr data, std::string src_layout, std::string 
dst_layout) {
+Expr MakeLayoutTransform(Expr data, String src_layout, String dst_layout) {
   auto attrs = make_object<LayoutTransformAttrs>();
   attrs->src_layout = std::move(src_layout);
   attrs->dst_layout = std::move(dst_layout);
diff --git a/src/relay/op/vision/rcnn_op.cc b/src/relay/op/vision/rcnn_op.cc
index efedb5e..f7e1ecb 100644
--- a/src/relay/op/vision/rcnn_op.cc
+++ b/src/relay/op/vision/rcnn_op.cc
@@ -52,7 +52,7 @@ bool ROIAlignRel(const Array<Type>& types, int num_inputs, 
const Attrs& attrs,
 }
 
 Expr MakeROIAlign(Expr data, Expr rois, Array<IndexExpr> pooled_size, double 
spatial_scale,
-                  int sample_ratio, std::string layout) {
+                  int sample_ratio, String layout) {
   auto attrs = make_object<ROIAlignAttrs>();
   attrs->pooled_size = pooled_size;
   attrs->spatial_scale = spatial_scale;
@@ -102,7 +102,7 @@ bool ROIPoolRel(const Array<Type>& types, int num_inputs, 
const Attrs& attrs,
 }
 
 Expr MakeROIPool(Expr data, Expr rois, Array<IndexExpr> pooled_size, double 
spatial_scale,
-                 std::string layout) {
+                 String layout) {
   auto attrs = make_object<ROIPoolAttrs>();
   attrs->pooled_size = pooled_size;
   attrs->spatial_scale = spatial_scale;
diff --git a/src/relay/qnn/op/convolution.cc b/src/relay/qnn/op/convolution.cc
index ae52a42..9412ab4 100644
--- a/src/relay/qnn/op/convolution.cc
+++ b/src/relay/qnn/op/convolution.cc
@@ -662,8 +662,8 @@ Expr QnnConv2DCanonicalize(const Attrs& attrs, const 
Array<Expr>& new_args,
 Expr MakeQnnConv2D(Expr data, Expr weight, Expr input_zero_point, Expr 
kernel_zero_point,
                    Expr input_scale, Expr kernel_scale, Array<IndexExpr> 
strides,
                    Array<IndexExpr> padding, Array<IndexExpr> dilation, int 
groups,
-                   IndexExpr channels, Array<IndexExpr> kernel_size, 
std::string data_layout,
-                   std::string kernel_layout, std::string out_layout, DataType 
out_dtype) {
+                   IndexExpr channels, Array<IndexExpr> kernel_size, String 
data_layout,
+                   String kernel_layout, String out_layout, DataType 
out_dtype) {
   auto attrs = make_object<Conv2DAttrs>();
   attrs->strides = std::move(strides);
   attrs->padding = std::move(padding);
diff --git a/src/relay/qnn/op/requantize.cc b/src/relay/qnn/op/requantize.cc
index 79cb08d..bdeaf05 100644
--- a/src/relay/qnn/op/requantize.cc
+++ b/src/relay/qnn/op/requantize.cc
@@ -281,7 +281,7 @@ bool RequantizeRel(const Array<Type>& types, int 
num_inputs, const Attrs& attrs,
 // Positional relay function to create qnn requantize operator
 // used by frontend FFI.
 Expr MakeRequantize(Expr data, Expr input_scale, Expr input_zero_point, Expr 
output_scale,
-                    Expr output_zero_point, int axis, std::string rounding, 
DataType out_dtype) {
+                    Expr output_zero_point, int axis, String rounding, 
DataType out_dtype) {
   auto attrs = make_object<RequantizeAttrs>();
   attrs->axis = axis;
   attrs->rounding = std::move(rounding);
diff --git a/src/relay/quantize/quantize.cc b/src/relay/quantize/quantize.cc
index d197458..1bf858b 100644
--- a/src/relay/quantize/quantize.cc
+++ b/src/relay/quantize/quantize.cc
@@ -67,7 +67,7 @@ RELAY_REGISTER_OP("relay.op.annotation.simulated_quantize")
 
 TVM_REGISTER_GLOBAL("relay._quantize.simulated_quantize")
     .set_body_typed([](Expr data, Expr dom_scale, Expr clip_min, Expr 
clip_max, int kind, bool sign,
-                       std::string rounding) {
+                       String rounding) {
       auto attrs = make_object<SimulatedQuantizeAttrs>();
       attrs->kind = kind;
       attrs->sign = sign;
diff --git a/src/relay/transforms/combine_parallel_op_batch.cc 
b/src/relay/transforms/combine_parallel_op_batch.cc
index 5cd287c..2e9ffdb 100644
--- a/src/relay/transforms/combine_parallel_op_batch.cc
+++ b/src/relay/transforms/combine_parallel_op_batch.cc
@@ -175,7 +175,7 @@ Expr CombineParallelOpBatch(const Expr& expr, const 
std::string& op_name,
 
 namespace transform {
 
-Pass CombineParallelOpBatch(const std::string& op_name, const std::string& 
batch_op_name,
+Pass CombineParallelOpBatch(const String& op_name, const String& batch_op_name,
                             uint64_t min_num_branches) {
   runtime::TypedPackedFunc<Function(Function, IRModule, PassContext)> 
pass_func =
       [=](Function f, IRModule m, PassContext pc) {
diff --git a/src/relay/transforms/forward_rewrite.cc 
b/src/relay/transforms/forward_rewrite.cc
index d872116..f093f54 100644
--- a/src/relay/transforms/forward_rewrite.cc
+++ b/src/relay/transforms/forward_rewrite.cc
@@ -172,7 +172,7 @@ class ForwardRewriter : private MixedModeMutator {
   }
 };
 
-Expr ForwardRewrite(const Expr& expr, const std::string& rewrite_map_name,
+Expr ForwardRewrite(const Expr& expr, const String& rewrite_map_name,
                     std::function<ObjectRef(const Call&)> fcontext,
                     std::function<Expr(const Expr&)> fmulti_ref_trigger) {
   auto rewrite_map = Op::GetAttrMap<FForwardRewrite>(rewrite_map_name);
diff --git a/src/relay/transforms/legalize.cc b/src/relay/transforms/legalize.cc
index c1f037f..89f59f6 100644
--- a/src/relay/transforms/legalize.cc
+++ b/src/relay/transforms/legalize.cc
@@ -96,7 +96,7 @@ Expr Legalize(const Expr& expr, const std::string& 
legalize_map_attr_name) {
 
 namespace transform {
 
-Pass Legalize(const std::string& legalize_map_attr_name) {
+Pass Legalize(const String& legalize_map_attr_name) {
   runtime::TypedPackedFunc<Function(Function, IRModule, PassContext)> 
pass_func =
       [=](Function f, IRModule m, PassContext pc) {
         return Downcast<Function>(relay::legalize::Legalize(f, 
legalize_map_attr_name));
diff --git a/src/relay/transforms/pattern_util.h 
b/src/relay/transforms/pattern_util.h
index 0a51404..8f37e7c 100644
--- a/src/relay/transforms/pattern_util.h
+++ b/src/relay/transforms/pattern_util.h
@@ -602,7 +602,7 @@ Expr MakeSqueeze(Expr data, Array<Integer> axis);
 
 Expr MakeExpandDims(Expr data, int axis, int num_newaxis);
 
-Expr MakeLayoutTransform(Expr data, std::string src_layout, std::string 
dst_layout);
+Expr MakeLayoutTransform(Expr data, String src_layout, String dst_layout);
 
 Expr StopFusion(Expr data);
 
diff --git a/src/relay/transforms/to_a_normal_form.cc 
b/src/relay/transforms/to_a_normal_form.cc
index c0c9286..a78c863 100644
--- a/src/relay/transforms/to_a_normal_form.cc
+++ b/src/relay/transforms/to_a_normal_form.cc
@@ -142,7 +142,7 @@ class Fill : ExprFunctor<Expr(const Expr&, const Var&)> {
   Expr Atomic(const Expr& e, const Var& v) { return v.defined() ? 
GetScope(e)->ll->Push(v, e) : e; }
 
   Expr Compound(const Expr& orig, const Expr& now, const Var& v) {
-    Var var = v.defined() ? v : Var(std::string("x"), Type());
+    Var var = v.defined() ? v : Var(String("x"), Type());
     return GetScope(orig)->ll->Push(var, now);
   }
 
diff --git a/tests/python/relay/test_json_compact.py 
b/tests/python/relay/test_json_compact.py
index 16d02d2..c961f99 100644
--- a/tests/python/relay/test_json_compact.py
+++ b/tests/python/relay/test_json_compact.py
@@ -43,6 +43,44 @@ def test_type_var():
     assert isinstance(tvar, tvm.ir.GlobalTypeVar)
     assert tvar.name_hint == "in0"
 
+def test_var():
+    # type var in 0.6
+    nodes = [
+        {"type_key": ""},
+        {"type_key": "relay.Var",
+         "attrs": {
+             "_checked_type_": "0",
+             "span": "0",
+             "type_annotation": "0",
+             "vid": "2"
+         }
+        },
+        {"type_key": "relay.Id",
+         "attrs": {"name_hint": "a3"}},
+        {"type_key": "relay.TensorType",
+         "attrs": {
+             "dtype": "float32",
+             "shape": "4",
+             "span": "0"
+         }
+        },
+        {"type_key": "Array",
+         "data": [5, 6]
+        },
+        {"type_key": "IntImm",
+         "attrs": {"dtype": "int32", "value": "16"}},
+        {"type_key": "IntImm",
+         "attrs": {"dtype": "int32", "value": "8"}}
+        ]
+    data = {
+        "root" : 1,
+        "nodes": nodes,
+        "attrs": {"tvm_version": "0.6.0"},
+        "b64ndarrays": [],
+    }
+    tvar = tvm.ir.load_json(json.dumps(data))
+    assert isinstance(tvar, relay.Var)
+    assert tvar.name_hint == "a3"
 
 def test_incomplete_type():
     nodes = [
@@ -151,6 +189,7 @@ def test_tir_var():
 if __name__ == "__main__":
     test_op()
     test_type_var()
+    test_var()
     test_incomplete_type()
     test_func_tuple_type()
     test_global_var()

Reply via email to