gromero commented on code in PR #12182:
URL: https://github.com/apache/tvm/pull/12182#discussion_r929331497


##########
gallery/how_to/work_with_microtvm/micro_aot.py:
##########
@@ -0,0 +1,162 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+"""
+.. _tutorial-micro-AoT:
+
+microTVM Host-Driven AoT
+===========================
+**Authors**:
+`Mehrdad Hessar <https://github.com/mehrdadh>`_,
+`Alan MacDonald <https://github.com/alanmacd>`_
+
+This tutorial is showcasing microTVM host-driven AoT compilation with
+a TFLite model. This tutorial can be executed on a X86 CPU using C runtime 
(CRT)
+or on Zephyr plarform on a microcontroller that supports Zephyr platform.
+"""
+
+import numpy as np
+import pathlib
+import json
+import os
+
+import tvm
+from tvm import relay
+from tvm.relay.backend import Executor, Runtime
+from tvm.contrib.download import download_testdata
+
+######################################################################
+# Import a TFLite model
+# ---------------------
+#
+# To begin with, download and import a TFLite model from TinyMLPerf models.
+#
+# **Note:** By default this tutorial runs on X86 CPU using CRT, if you would 
like to run on Zephyr platform
+# you need to export `TVM_MICRO_USE_HW` environment variable.
+#
+use_physical_hw = bool(os.getenv("TVM_MICRO_USE_HW"))
+MODEL_URL = 
"https://github.com/tlc-pack/web-data/raw/main/testdata/microTVM/model/keyword_spotting_quant.tflite";
+MODEL_PATH = download_testdata(MODEL_URL, "keyword_spotting_quant.tflite", 
module="model")
+SAMPLE_URL = 
"https://github.com/tlc-pack/web-data/raw/main/testdata/microTVM/data/keyword_spotting_int8_6.pyc.npy";
+SAMPLE_PATH = download_testdata(SAMPLE_URL, "keyword_spotting_int8_6.pyc.npy", 
module="data")
+
+tflite_model_buf = open(MODEL_PATH, "rb").read()
+try:
+    import tflite
+
+    tflite_model = tflite.Model.GetRootAsModel(tflite_model_buf, 0)
+except AttributeError:
+    import tflite.Model
+
+    tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buf, 0)
+
+input_shape = (1, 49, 10, 1)
+INPUT_NAME = "input_1"
+relay_mod, params = relay.frontend.from_tflite(
+    tflite_model, shape_dict={INPUT_NAME: input_shape}, 
dtype_dict={INPUT_NAME: "int8"}
+)
+
+######################################################################
+# Defining the target
+# -------------------
+#
+# Now we need to define the target, runtime and executor. In this tutorial, we 
focused on
+# using AOT host driven executor. We use the host micro target which is for 
running a model
+# on X86 CPU using CRT runtime or running a model with Zephyr platform on 
qemu_x86 simulator
+# board. In the case of a physical microcontoller, we get the target model for 
the physical
+# board (E.g. nucleo_f746zg) and pass it to `tvm.target.target.micro` to 
create a full
+# micro target.
+#
+RUNTIME = Runtime("crt", {"system-lib": True})
+TARGET = tvm.target.target.micro("host")
+EXECUTOR = Executor("aot")
+
+if use_physical_hw:
+    boards_file = 
pathlib.Path(tvm.micro.get_microtvm_template_projects("zephyr")) / "boards.json"
+    with open(boards_file) as f:
+        boards = json.load(f)
+    BOARD = os.getenv("TVM_MICRO_BOARD", default="nucleo_f746zg")
+    TARGET = tvm.target.target.micro(boards[BOARD]["model"])
+
+######################################################################
+# Compile the model
+# -----------------
+#
+# Now, we compile the model for the target:
+#
+with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": 
True}):
+    module = tvm.relay.build(
+        relay_mod, target=TARGET, params=params, runtime=RUNTIME, 
executor=EXECUTOR
+    )
+
+######################################################################
+# Create a microTVM project
+# -----------------------
+#
+# Now that we have the comipled model as an IRModule, we need to create a 
project

Review Comment:
   compiled model



##########
gallery/how_to/work_with_microtvm/micro_aot.py:
##########
@@ -0,0 +1,162 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+"""
+.. _tutorial-micro-AoT:
+
+microTVM Host-Driven AoT
+===========================
+**Authors**:
+`Mehrdad Hessar <https://github.com/mehrdadh>`_,
+`Alan MacDonald <https://github.com/alanmacd>`_
+
+This tutorial is showcasing microTVM host-driven AoT compilation with
+a TFLite model. This tutorial can be executed on a X86 CPU using C runtime 
(CRT)
+or on Zephyr plarform on a microcontroller that supports Zephyr platform.
+"""
+
+import numpy as np
+import pathlib
+import json
+import os
+
+import tvm
+from tvm import relay
+from tvm.relay.backend import Executor, Runtime
+from tvm.contrib.download import download_testdata
+
+######################################################################
+# Import a TFLite model
+# ---------------------
+#
+# To begin with, download and import a TFLite model from TinyMLPerf models.
+#
+# **Note:** By default this tutorial runs on X86 CPU using CRT, if you would 
like to run on Zephyr platform
+# you need to export `TVM_MICRO_USE_HW` environment variable.
+#
+use_physical_hw = bool(os.getenv("TVM_MICRO_USE_HW"))
+MODEL_URL = 
"https://github.com/tlc-pack/web-data/raw/main/testdata/microTVM/model/keyword_spotting_quant.tflite";
+MODEL_PATH = download_testdata(MODEL_URL, "keyword_spotting_quant.tflite", 
module="model")
+SAMPLE_URL = 
"https://github.com/tlc-pack/web-data/raw/main/testdata/microTVM/data/keyword_spotting_int8_6.pyc.npy";
+SAMPLE_PATH = download_testdata(SAMPLE_URL, "keyword_spotting_int8_6.pyc.npy", 
module="data")
+
+tflite_model_buf = open(MODEL_PATH, "rb").read()
+try:
+    import tflite
+
+    tflite_model = tflite.Model.GetRootAsModel(tflite_model_buf, 0)
+except AttributeError:
+    import tflite.Model
+
+    tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buf, 0)
+
+input_shape = (1, 49, 10, 1)
+INPUT_NAME = "input_1"
+relay_mod, params = relay.frontend.from_tflite(
+    tflite_model, shape_dict={INPUT_NAME: input_shape}, 
dtype_dict={INPUT_NAME: "int8"}
+)
+
+######################################################################
+# Defining the target
+# -------------------
+#
+# Now we need to define the target, runtime and executor. In this tutorial, we 
focused on
+# using AOT host driven executor. We use the host micro target which is for 
running a model
+# on X86 CPU using CRT runtime or running a model with Zephyr platform on 
qemu_x86 simulator
+# board. In the case of a physical microcontoller, we get the target model for 
the physical
+# board (E.g. nucleo_f746zg) and pass it to `tvm.target.target.micro` to 
create a full
+# micro target.
+#
+RUNTIME = Runtime("crt", {"system-lib": True})
+TARGET = tvm.target.target.micro("host")
+EXECUTOR = Executor("aot")
+
+if use_physical_hw:
+    boards_file = 
pathlib.Path(tvm.micro.get_microtvm_template_projects("zephyr")) / "boards.json"
+    with open(boards_file) as f:
+        boards = json.load(f)
+    BOARD = os.getenv("TVM_MICRO_BOARD", default="nucleo_f746zg")
+    TARGET = tvm.target.target.micro(boards[BOARD]["model"])
+
+######################################################################
+# Compile the model
+# -----------------
+#
+# Now, we compile the model for the target:
+#
+with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": 
True}):
+    module = tvm.relay.build(
+        relay_mod, target=TARGET, params=params, runtime=RUNTIME, 
executor=EXECUTOR
+    )
+
+######################################################################
+# Create a microTVM project
+# -----------------------
+#
+# Now that we have the comipled model as an IRModule, we need to create a 
project
+# with the compiled model in microTVM. To do this, we use Project API. We have 
defined
+# CRT and Zephyr microTVM template projects which are used for X86 CPU and 
Zephyr platforms
+# respectively.
+#
+template_project_path = 
pathlib.Path(tvm.micro.get_microtvm_template_projects("crt"))
+project_options = {}  # You can use options to provide platform-specific 
options through TVM.
+
+if use_physical_hw:
+    template_project_path = 
pathlib.Path(tvm.micro.get_microtvm_template_projects("zephyr"))
+    project_options = {"project_type": "host_driven", "zephyr_board": BOARD}
+
+temp_dir = tvm.contrib.utils.tempdir()
+generated_project_dir = temp_dir / "project"
+project = tvm.micro.generate_project(
+    template_project_path, module, generated_project_dir, project_options
+)
+
+######################################################################
+# Build, flash and execute the model
+# -----------------------

Review Comment:
   extend "---..."  until the end of the line above, i.e. until "l" in "model"?



##########
gallery/how_to/work_with_microtvm/micro_aot.py:
##########
@@ -0,0 +1,162 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+"""
+.. _tutorial-micro-AoT:
+
+microTVM Host-Driven AoT
+===========================
+**Authors**:
+`Mehrdad Hessar <https://github.com/mehrdadh>`_,
+`Alan MacDonald <https://github.com/alanmacd>`_
+
+This tutorial is showcasing microTVM host-driven AoT compilation with
+a TFLite model. This tutorial can be executed on a X86 CPU using C runtime 
(CRT)
+or on Zephyr plarform on a microcontroller that supports Zephyr platform.
+"""
+
+import numpy as np
+import pathlib
+import json
+import os
+
+import tvm
+from tvm import relay
+from tvm.relay.backend import Executor, Runtime
+from tvm.contrib.download import download_testdata
+
+######################################################################
+# Import a TFLite model
+# ---------------------
+#
+# To begin with, download and import a TFLite model from TinyMLPerf models.

Review Comment:
   It's not actually  importing directly from the MLPerf Tiny bench, i.e. from:
   
   
https://github.com/mlcommons/tiny/tree/master/benchmark/training/keyword_spotting/trained_models
   
   Should this benchmark be mentioned just as the original source of this model?
   
   Also I think the current name for the bench is MLPerf Tiny, not TinyMLPerf, 
like in the text found in the READM.md here? https://github.com/mlcommons/tiny
   
   
   
   



##########
gallery/how_to/work_with_microtvm/micro_aot.py:
##########
@@ -0,0 +1,162 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+"""
+.. _tutorial-micro-AoT:
+
+microTVM Host-Driven AoT
+===========================
+**Authors**:
+`Mehrdad Hessar <https://github.com/mehrdadh>`_,
+`Alan MacDonald <https://github.com/alanmacd>`_
+
+This tutorial is showcasing microTVM host-driven AoT compilation with
+a TFLite model. This tutorial can be executed on a X86 CPU using C runtime 
(CRT)
+or on Zephyr plarform on a microcontroller that supports Zephyr platform.

Review Comment:
   platforms



##########
gallery/how_to/work_with_microtvm/micro_aot.py:
##########
@@ -0,0 +1,162 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+"""
+.. _tutorial-micro-AoT:
+
+microTVM Host-Driven AoT
+===========================
+**Authors**:
+`Mehrdad Hessar <https://github.com/mehrdadh>`_,
+`Alan MacDonald <https://github.com/alanmacd>`_
+
+This tutorial is showcasing microTVM host-driven AoT compilation with
+a TFLite model. This tutorial can be executed on a X86 CPU using C runtime 
(CRT)
+or on Zephyr plarform on a microcontroller that supports Zephyr platform.
+"""
+
+import numpy as np
+import pathlib
+import json
+import os
+
+import tvm
+from tvm import relay
+from tvm.relay.backend import Executor, Runtime
+from tvm.contrib.download import download_testdata
+
+######################################################################
+# Import a TFLite model
+# ---------------------
+#
+# To begin with, download and import a TFLite model from TinyMLPerf models.
+#
+# **Note:** By default this tutorial runs on X86 CPU using CRT, if you would 
like to run on Zephyr platform
+# you need to export `TVM_MICRO_USE_HW` environment variable.
+#
+use_physical_hw = bool(os.getenv("TVM_MICRO_USE_HW"))
+MODEL_URL = 
"https://github.com/tlc-pack/web-data/raw/main/testdata/microTVM/model/keyword_spotting_quant.tflite";
+MODEL_PATH = download_testdata(MODEL_URL, "keyword_spotting_quant.tflite", 
module="model")
+SAMPLE_URL = 
"https://github.com/tlc-pack/web-data/raw/main/testdata/microTVM/data/keyword_spotting_int8_6.pyc.npy";
+SAMPLE_PATH = download_testdata(SAMPLE_URL, "keyword_spotting_int8_6.pyc.npy", 
module="data")
+
+tflite_model_buf = open(MODEL_PATH, "rb").read()
+try:
+    import tflite
+
+    tflite_model = tflite.Model.GetRootAsModel(tflite_model_buf, 0)
+except AttributeError:
+    import tflite.Model
+
+    tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buf, 0)
+
+input_shape = (1, 49, 10, 1)
+INPUT_NAME = "input_1"
+relay_mod, params = relay.frontend.from_tflite(
+    tflite_model, shape_dict={INPUT_NAME: input_shape}, 
dtype_dict={INPUT_NAME: "int8"}
+)
+
+######################################################################
+# Defining the target
+# -------------------
+#
+# Now we need to define the target, runtime and executor. In this tutorial, we 
focused on
+# using AOT host driven executor. We use the host micro target which is for 
running a model
+# on X86 CPU using CRT runtime or running a model with Zephyr platform on 
qemu_x86 simulator
+# board. In the case of a physical microcontoller, we get the target model for 
the physical
+# board (E.g. nucleo_f746zg) and pass it to `tvm.target.target.micro` to 
create a full
+# micro target.
+#
+RUNTIME = Runtime("crt", {"system-lib": True})
+TARGET = tvm.target.target.micro("host")
+EXECUTOR = Executor("aot")
+
+if use_physical_hw:
+    boards_file = 
pathlib.Path(tvm.micro.get_microtvm_template_projects("zephyr")) / "boards.json"
+    with open(boards_file) as f:
+        boards = json.load(f)
+    BOARD = os.getenv("TVM_MICRO_BOARD", default="nucleo_f746zg")
+    TARGET = tvm.target.target.micro(boards[BOARD]["model"])
+
+######################################################################
+# Compile the model
+# -----------------
+#
+# Now, we compile the model for the target:
+#
+with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": 
True}):
+    module = tvm.relay.build(
+        relay_mod, target=TARGET, params=params, runtime=RUNTIME, 
executor=EXECUTOR
+    )
+
+######################################################################
+# Create a microTVM project
+# -----------------------
+#
+# Now that we have the comipled model as an IRModule, we need to create a 
project
+# with the compiled model in microTVM. To do this, we use Project API. We have 
defined
+# CRT and Zephyr microTVM template projects which are used for X86 CPU and 
Zephyr platforms
+# respectively.
+#
+template_project_path = 
pathlib.Path(tvm.micro.get_microtvm_template_projects("crt"))
+project_options = {}  # You can use options to provide platform-specific 
options through TVM.
+
+if use_physical_hw:
+    template_project_path = 
pathlib.Path(tvm.micro.get_microtvm_template_projects("zephyr"))
+    project_options = {"project_type": "host_driven", "zephyr_board": BOARD}
+
+temp_dir = tvm.contrib.utils.tempdir()
+generated_project_dir = temp_dir / "project"
+project = tvm.micro.generate_project(
+    template_project_path, module, generated_project_dir, project_options
+)
+
+######################################################################
+# Build, flash and execute the model
+# -----------------------
+# Next, we build the microTVM project and flash it. Flash step is specific to
+# physical microcontrollers and it is skipped if it is using CRT runtime or 
running

Review Comment:
   Change to " ... and it is skipped if the CRT runtime is used or if a Zephyr 
emulated board is selected as the target." ?  "Zephyr simulator" sounds a bit 
odd to me.



##########
gallery/how_to/work_with_microtvm/micro_aot.py:
##########
@@ -0,0 +1,162 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+"""
+.. _tutorial-micro-AoT:
+
+microTVM Host-Driven AoT
+===========================
+**Authors**:
+`Mehrdad Hessar <https://github.com/mehrdadh>`_,
+`Alan MacDonald <https://github.com/alanmacd>`_
+
+This tutorial is showcasing microTVM host-driven AoT compilation with
+a TFLite model. This tutorial can be executed on a X86 CPU using C runtime 
(CRT)
+or on Zephyr plarform on a microcontroller that supports Zephyr platform.
+"""
+
+import numpy as np
+import pathlib
+import json
+import os
+
+import tvm
+from tvm import relay
+from tvm.relay.backend import Executor, Runtime
+from tvm.contrib.download import download_testdata
+
+######################################################################
+# Import a TFLite model
+# ---------------------
+#
+# To begin with, download and import a TFLite model from TinyMLPerf models.
+#
+# **Note:** By default this tutorial runs on X86 CPU using CRT, if you would 
like to run on Zephyr platform
+# you need to export `TVM_MICRO_USE_HW` environment variable.
+#
+use_physical_hw = bool(os.getenv("TVM_MICRO_USE_HW"))
+MODEL_URL = 
"https://github.com/tlc-pack/web-data/raw/main/testdata/microTVM/model/keyword_spotting_quant.tflite";
+MODEL_PATH = download_testdata(MODEL_URL, "keyword_spotting_quant.tflite", 
module="model")
+SAMPLE_URL = 
"https://github.com/tlc-pack/web-data/raw/main/testdata/microTVM/data/keyword_spotting_int8_6.pyc.npy";
+SAMPLE_PATH = download_testdata(SAMPLE_URL, "keyword_spotting_int8_6.pyc.npy", 
module="data")
+
+tflite_model_buf = open(MODEL_PATH, "rb").read()
+try:
+    import tflite
+
+    tflite_model = tflite.Model.GetRootAsModel(tflite_model_buf, 0)
+except AttributeError:
+    import tflite.Model
+
+    tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buf, 0)
+
+input_shape = (1, 49, 10, 1)
+INPUT_NAME = "input_1"
+relay_mod, params = relay.frontend.from_tflite(
+    tflite_model, shape_dict={INPUT_NAME: input_shape}, 
dtype_dict={INPUT_NAME: "int8"}
+)
+
+######################################################################
+# Defining the target
+# -------------------
+#
+# Now we need to define the target, runtime and executor. In this tutorial, we 
focused on
+# using AOT host driven executor. We use the host micro target which is for 
running a model
+# on X86 CPU using CRT runtime or running a model with Zephyr platform on 
qemu_x86 simulator
+# board. In the case of a physical microcontoller, we get the target model for 
the physical
+# board (E.g. nucleo_f746zg) and pass it to `tvm.target.target.micro` to 
create a full
+# micro target.
+#
+RUNTIME = Runtime("crt", {"system-lib": True})
+TARGET = tvm.target.target.micro("host")
+EXECUTOR = Executor("aot")
+
+if use_physical_hw:
+    boards_file = 
pathlib.Path(tvm.micro.get_microtvm_template_projects("zephyr")) / "boards.json"
+    with open(boards_file) as f:
+        boards = json.load(f)
+    BOARD = os.getenv("TVM_MICRO_BOARD", default="nucleo_f746zg")
+    TARGET = tvm.target.target.micro(boards[BOARD]["model"])
+
+######################################################################
+# Compile the model
+# -----------------
+#
+# Now, we compile the model for the target:
+#
+with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": 
True}):
+    module = tvm.relay.build(
+        relay_mod, target=TARGET, params=params, runtime=RUNTIME, 
executor=EXECUTOR
+    )
+
+######################################################################
+# Create a microTVM project
+# -----------------------
+#
+# Now that we have the comipled model as an IRModule, we need to create a 
project
+# with the compiled model in microTVM. To do this, we use Project API. We have 
defined

Review Comment:
   would it be better to write something like the following instead:
   
   "we need to create a project to use the compiled model with microTVM"?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to