hartmannathan commented on code in PR #14851:
URL: https://github.com/apache/nuttx/pull/14851#discussion_r1849157590


##########
tools/gdb/macros.py:
##########
@@ -0,0 +1,214 @@
+############################################################################
+# tools/gdb/macros.py
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.  The
+# ASF licenses this file to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance with the
+# License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+############################################################################
+
+# NOTE: GDB stores macro information based on the current stack frame's scope,
+# including the source file and line number. Therefore, there may be missing
+# macro definitions when you are at different stack frames.
+#
+# To resolve this issue, we need to retrieve all macro information from the 
ELF file
+# then parse and evaluate it by ourselves.
+#
+# There might be two ways to achieve this, one is to leverage the C 
preprocessor
+# to directly preprocess all the macros instereted into python constants
+# gcc -E -x c -P <file_with_macros> -I/path/to/nuttx/include
+#
+# While the other way is to leverage the dwarf info stored in the ELF file,
+# with -g3 switch, we have a `.debug_macro` section containing all the 
information
+# about the macros.
+#
+# Currently, we using the second method.
+
+import os
+import re
+import subprocess
+import tempfile
+
+PUNCTUATORS = [
+    "\[",
+    "\]",
+    "\(",
+    "\)",
+    "\{",
+    "\}",
+    "\?",
+    ";",
+    ",",
+    "~",
+    "\.\.\.",
+    "\.",
+    "\-\>",
+    "\-\-",
+    "\-\=",
+    "\-",
+    "\+\+",
+    "\+\=",
+    "\+",
+    "\*\=",
+    "\*",
+    "\!\=",
+    "\!",
+    "\&\&",
+    "\&\=",
+    "\&",
+    "\/\=",
+    "\/",
+    "\%\>",
+    "%:%:",
+    "%:",
+    "%=",
+    "%",
+    "\^\=",
+    "\^",
+    "\#\#",
+    "\#",
+    "\:\>",
+    "\:",
+    "\|\|",
+    "\|\=",
+    "\|",
+    "<<=",
+    "<<",
+    "<=",
+    "<:",
+    "<%",
+    "<",
+    ">>=",
+    ">>",
+    ">=",
+    ">",
+    "\=\=",
+    "\=",
+]
+
+
+def parse_macro(line, macros, pattern):
+    # grep name, value
+    # the first group matches the token, the second matches the replacement
+    m = pattern.match(line)
+    if not m:
+        return False
+
+    name, value = m.group(1), m.group(2)
+
+    if name in macros:
+        # FIXME: what should we do if we got a redefinition/duplication here?
+        # for now I think it's ok just overwrite the old value
+        pass
+
+    # emplace, for all undefined macros we evalute it to zero
+    macros[name] = value if value else "0"
+
+    return True
+
+
+def fetch_macro_info(file):
+    if not os.path.isfile(file):
+        raise FileNotFoundError("No given ELF target found")
+
+    # FIXME: we don't use subprocess here because
+    # it's broken on some GDB distribution :(, I haven't
+    # found a solution to it.
+
+    with tempfile.NamedTemporaryFile(delete=False) as f1:
+
+        # # os.system(f"readelf -wm {file} > {output}")
+        process = subprocess.Popen(
+            f"readelf -wm {file}", shell=True, stdout=f1, 
stderr=subprocess.STDOUT
+        )
+
+        process.communicate()
+        errcode = process.returncode
+
+        f1.close()
+
+        if errcode != 0:
+            return {}
+
+        p = re.compile(".*macro[ ]*:[ ]*([\S]+\(.*?\)|[\w]+)[ ]*(.*)")
+        macros = {}
+
+        with open(f1.name, "rb") as f2:
+            for line in f2.readlines():
+                line = line.decode("utf-8")
+                if not line.startswith(" DW_MACRO_define") and not 
line.startswith(
+                    " DW_MACRO_undef"
+                ):
+                    continue
+
+                if not parse_macro(line, macros, p):
+                    print(f"Failed to parse {line}")
+
+    return macros
+
+
+def split_tokens(expr):
+    p = "(" + "|".join(PUNCTUATORS) + ")"
+    res = list(
+        filter(lambda e: e != "", map(lambda e: e.rstrip().lstrip(), 
re.split(p, expr)))
+    )
+    return res
+
+
+def do_expand(expr, macro_map):
+    if expr in PUNCTUATORS:
+        return expr
+
+    tokens = split_tokens(expr)
+
+    res = []
+
+    for t in tokens:
+        if t not in macro_map:
+            res.append(t)
+            continue
+        res += do_expand(macro_map[t], macro_map)
+
+    return res
+
+
+# NOTE: Implement a fully functional parser which can
+# preprocessing all the C marcos according to ISO 9899 standard
+# may be an overkill, what we really care about are those
+# macros that can be evaluted to an constant value.
+#
+# #define A (B + C + D)
+# #define B 1
+# #define C 2
+# #define D 3
+# invoking try_expand('A', macro_map) will give you "(1 + 2 + 3)"
+#
+# However,
+# #define SUM(B,C,D) (B + C + D)
+# invoking try_expand('SUM(1,2,3)', macro_map) will give you "SUM(1,2,3)"
+#
+# We have not implemented this feature as we have not found a practical
+# use case for it in our GDB plugin.
+#
+# However, you can switch to the correct stack frame that has this macro 
defined
+# and let GDB expand and evaluate it for you if you really want to evalue some 
very

Review Comment:
   ```suggestion
   # and let GDB expand and evaluate it for you if you really want to evaluate 
some very
   ```



##########
tools/gdb/memdump.py:
##########
@@ -142,127 +368,170 @@ def mempool_dump(self, mpool, pid, seqmin, seqmax, 
address):
                         and (buf["seqno"] >= seqmin and buf["seqno"] < seqmax)
                         and buf["magic"] == MEMPOOL_MAGIC_ALLOC
                     ):
-                        charnode = gdb.Value(buf).cast(
-                            gdb.lookup_type("char").pointer()
-                        )
-                        gdb.write(
-                            "%6d%12u%12u%#*x"
-                            % (
-                                buf["pid"],
-                                mm_nodesize(pool["blocksize"]),
-                                buf["seqno"],
+                        charnode = int(buf)
+                        if detail:
+                            mempool_dumpbuf(
+                                buf,
+                                pool["blocksize"],
+                                1,
                                 self.align,
-                                (int)(charnode - pool["blocksize"]),
+                                simple,
+                                detail,
+                                self.check_alive(buf["pid"]),
+                            )
+                        else:
+                            self.backtrace_dict = record_backtrace(
+                                buf, pool["blocksize"], self.backtrace_dict
                             )
-                        )
-                        if buf.type.has_key("backtrace"):
-                            max = buf["backtrace"].type.range()[1]
-                            for x in range(0, max):
-                                gdb.write(" ")
-                                gdb.write(
-                                    buf["backtrace"][x].format_string(
-                                        raw=False, symbols=True, address=False
-                                    )
-                                )
-
                         if address and (
-                            address < int(charnode)
-                            and address >= (int)(charnode - pool["blocksize"])
+                            address < charnode
+                            and address >= charnode - pool["blocksize"]
                         ):
+                            mempool_dumpbuf(
+                                buf,
+                                pool["blocksize"],
+                                1,
+                                self.align,
+                                simple,
+                                detail,
+                                self.check_alive(buf["pid"]),
+                            )
                             gdb.write(
                                 "\nThe address 0x%x found belongs to"
                                 "the mempool node with base address 0x%x\n"
                                 % (address, charnode)
                             )
+                            print_node = "p *(struct mempool_backtrace_s 
*)0x%x" % (
+                                charnode
+                            )
+                            gdb.write(print_node + "\n")
+                            gdb.execute(print_node)
                             return True
-
-                        gdb.write("\n")
                         self.aordblks += 1
                         self.uordblks += pool["blocksize"]
         return False
 
-    def memdump(self, pid, seqmin, seqmax, address):
+    def memnode_dump(self, node):
+        self.aordblks += 1
+        self.uordblks += node.size
+        node.dump(
+            detail=self.detail,
+            simple=self.simple,
+            align=self.align,
+            check_alive=self.check_alive,
+            backtrace_dict=self.backtrace_dict,
+        )
+
+    def memdump(self, pid, seqmin, seqmax, address, simple, detail, 
biggest_top=30):
         """Dump the heap memory"""
-        if pid >= PID_MM_ALLOC:
-            gdb.write("Dump all used memory node info:\n")
-            gdb.write(
-                "%6s%12s%12s%*s %s\n"
-                % ("PID", "Size", "Sequence", self.align, "Address", 
"Callstack")
-            )
-        else:
-            gdb.write("Dump all free memory node info:\n")
-            gdb.write("%12s%*s\n" % ("Size", self.align, "Address"))
+
+        self.simple = simple
+        self.detail = detail
+
+        alloc_node = []
+        free_node = []
 
         heap = gdb.parse_and_eval("g_mmheap")
-        if heap.type.has_key("mm_mpool"):
-            if self.mempool_dump(heap["mm_mpool"], pid, seqmin, seqmax, 
address):
-                return
+        prev_node = None
 
-        for node in mm_foreach(heap):
-            if node["size"] & MM_ALLOC_BIT != 0:
-                if (
-                    pid == node["pid"]
-                    or (pid == PID_MM_ALLOC and node["pid"] != PID_MM_MEMPOOL)
-                ) and (node["seqno"] >= seqmin and node["seqno"] < seqmax):
-                    charnode = 
gdb.Value(node).cast(gdb.lookup_type("char").pointer())
+        for gdb_node in mm_foreach(heap):
+            node = HeapNode(gdb_node)
+
+            if prev_node:
+                prev_node.nextfree = not node.alloc
+
+            prev_node = node
+
+            if not node.inside_sequence(seqmin, seqmax):
+                continue
+
+            if address:
+                if node.contains_address(address):
                     gdb.write(
-                        "%6d%12u%12u%#*x"
-                        % (
-                            node["pid"],
-                            mm_nodesize(node["size"]),
-                            node["seqno"],
-                            self.align,
-                            (int)(
-                                charnode
-                                + gdb.lookup_type("struct 
mm_allocnode_s").sizeof
-                            ),
-                        )
+                        "\nThe address 0x%x found belongs to"
+                        "the memory node with base address 0x%x\n"
+                        % (address, node.base)
                     )
+                    print_node = "p *(struct mm_allocnode_s *)0x%x" % 
(node.base)
+                    gdb.write(print_node + "\n")
+                    gdb.execute(print_node)
+                    return
 
-                    if node.type.has_key("backtrace"):
-                        max = node["backtrace"].type.range()[1]
-                        for x in range(0, max):
-                            gdb.write(" ")
-                            gdb.write(
-                                node["backtrace"][x].format_string(
-                                    raw=False, symbols=True, address=False
-                                )
-                            )
+            if node.alloc:
+                alloc_node.append(node)
+            else:
+                free_node.append(node)
 
-                    gdb.write("\n")
+        if heap.type.has_key("mm_mpool"):
+            if self.mempool_dump(
+                heap["mm_mpool"], pid, seqmin, seqmax, address, simple, detail
+            ):
+                return
 
-                    if address and (
-                        address < int(charnode + node["size"])
-                        and address
-                        >= (int)(
-                            charnode + gdb.lookup_type("struct 
mm_allocnode_s").sizeof
-                        )
-                    ):
-                        gdb.write(
-                            "\nThe address 0x%x found belongs to"
-                            "the memory node with base address 0x%x\n"
-                            % (address, charnode)
-                        )
-                        return
+        title_dict = {
+            PID_MM_ALLOC: "Dump all used memory node info, use 
'\x1b[33;1m*\x1b[m' mark pid is not exist:\n",

Review Comment:
   ```suggestion
               PID_MM_ALLOC: "Dump all used memory node info, use 
'\x1b[33;1m*\x1b[m' mark pid does not exist:\n",
   ```



##########
tools/gdb/utils.py:
##########
@@ -155,23 +321,37 @@ def read_ulong(buffer, offset):
 target_arch = None
 
 
-def is_target_arch(arch):
-    """Return True if the target architecture is ARCH"""
+def is_target_arch(arch, exact=False):
+    """
+    For non exactly match, this function will

Review Comment:
   ```suggestion
       For non exact match, this function will
   ```



##########
tools/gdb/utils.py:
##########
@@ -1,8 +1,6 @@
 ############################################################################
 # tools/gdb/utils.py
 #

Review Comment:
   Removed SPDX License Identifier by mistake?



##########
tools/gdb/macros.py:
##########
@@ -0,0 +1,214 @@
+############################################################################
+# tools/gdb/macros.py
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.  The
+# ASF licenses this file to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance with the
+# License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+############################################################################
+
+# NOTE: GDB stores macro information based on the current stack frame's scope,
+# including the source file and line number. Therefore, there may be missing
+# macro definitions when you are at different stack frames.
+#
+# To resolve this issue, we need to retrieve all macro information from the 
ELF file
+# then parse and evaluate it by ourselves.
+#
+# There might be two ways to achieve this, one is to leverage the C 
preprocessor
+# to directly preprocess all the macros instereted into python constants
+# gcc -E -x c -P <file_with_macros> -I/path/to/nuttx/include
+#
+# While the other way is to leverage the dwarf info stored in the ELF file,
+# with -g3 switch, we have a `.debug_macro` section containing all the 
information
+# about the macros.
+#
+# Currently, we using the second method.

Review Comment:
   ```suggestion
   # Currently, we are using the second method.
   ```



##########
tools/gdb/stack.py:
##########
@@ -0,0 +1,206 @@
+############################################################################
+# tools/gdb/stack.py
+#
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.  The
+# ASF licenses this file to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance with the
+# License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+############################################################################
+
+import traceback
+
+import gdb
+import utils
+
+STACK_COLORATION_PATTERN = utils.get_symbol_value("STACK_COLOR")
+
+
+class Stack(object):
+    def __init__(self, name, entry, base, alloc, size, cursp, align):
+        # We don't care about the stack growth here, base always point to the 
lower address!
+        self._thread_name = name
+        self._thread_entry = entry
+        self._stack_base = base
+        self._stack_alloc = alloc
+        self._stack_top = base + size
+        self._cur_sp = cursp
+        self._stack_size = size
+        self._align = align
+        self._pattern = STACK_COLORATION_PATTERN
+
+        self._sanity_check()
+
+    def _sanity_check(self):
+        # do some basic sanity checking to make sure we have a sane stack 
object
+        if self._stack_base < self._stack_alloc or not self._stack_size:
+            raise gdb.GdbError("Inconsistant stack size...Maybe memory 
corruption?")
+
+        # TODO: check if stack ptr is located at a sane address range!
+
+    def cur_usage(self):
+        usage = self._stack_top - self._cur_sp
+
+        if self.is_stackof():
+            gdb.write("An overflow detected, dumping the stack:\n")
+
+            ptr_4bytes = gdb.Value(self._stack_base).cast(
+                utils.lookup_type("unsigned int").pointer()
+            )
+
+            for i in range(0, self._stack_size // 4):
+                if i % 8 == 0:
+                    gdb.write(f"{hex(self._stack_base + 4 * i)}: ")
+
+                gdb.write(f"{hex(ptr_4bytes[i]):10} ")
+
+                if i % 8 == 7:
+                    gdb.write("\n")
+
+            gdb.write("\n")
+            raise gdb.GdbError(
+                "pls check your stack size! @ {0} sp:{1:x} base:{2:x}".format(
+                    self._thread_name, self._cur_sp, self._stack_base
+                )
+            )
+
+        return usage
+
+    def check_max_usage(self):
+        ptr_4bytes = gdb.Value(self._stack_base).cast(
+            utils.lookup_type("unsigned int").pointer()
+        )
+
+        spare = 0
+
+        for i in range(0, self._stack_size // 4):
+            if int(ptr_4bytes[i]) != self._pattern:
+                spare = i * 4
+                break
+        return self._stack_size - spare
+
+    def max_usage(self):
+        if not utils.get_symbol_value("CONFIG_STACK_COLORATION"):
+            return 0
+
+        return self.check_max_usage()
+
+    def avalaible(self):
+        cur_usage = self.cur_usage()
+        return self._stack_size - cur_usage
+
+    def maxdepth_backtrace(self):
+        raise gdb.GdbError("Not implemented yet", traceback.print_stack())
+
+    def cur_sp(self):
+        return self._cur_sp
+
+    def is_stackof(self):
+        # we should notify the user if the stack overflow is about to happen 
as well!
+        return self._cur_sp <= self._stack_base
+
+    def has_stackof(self):
+        max_usage = self.max_usage()
+
+        return max_usage >= self._stack_size
+
+
+# Always refetch the stack infos, never cached as we may have threads 
created/destroyed
+# dynamically!
+def fetch_stacks():
+    stacks = dict()
+
+    for tcb in utils.get_tcbs():
+        if (
+            tcb["task_state"] == gdb.parse_and_eval("TSTATE_TASK_RUNNING")
+            and not utils.in_interrupt_context()
+        ):
+            sp = utils.get_sp()
+        else:
+            sp = utils.get_sp(tcb=tcb)
+
+        try:
+            stacks[int(tcb["pid"])] = Stack(
+                tcb["name"].string(),
+                hex(tcb["entry"]["pthread"]),  # should use main?
+                int(tcb["stack_base_ptr"]),
+                int(tcb["stack_alloc_ptr"]),
+                int(tcb["adj_stack_size"]),
+                sp,
+                4,
+            )
+
+        except gdb.GdbError as e:
+            gdb.write(f"Failed to construction stack object for tcb: {e}")

Review Comment:
   ```suggestion
               gdb.write(f"Failed to construct stack object for tcb: {e}")
   ```



##########
tools/gdb/memdump.py:
##########
@@ -278,35 +547,463 @@ def parse_arguments(self, argv):
         parser.add_argument("-x", "--max", type=str, help="Maximum value")
         parser.add_argument("--used", action="store_true", help="Used flag")
         parser.add_argument("--free", action="store_true", help="Free flag")
-        args = parser.parse_args(args=(None if len(argv) == 1 else argv))
+        parser.add_argument("--biggest", action="store_true", help="biggest 
allocated")
+        parser.add_argument("--top", type=str, help="biggest top n, default 
30")
+        parser.add_argument(
+            "--orphan", action="store_true", help="orphan allocated(neighbor 
of free)"
+        )
+        parser.add_argument(
+            "-d",
+            "--detail",
+            action="store_true",
+            help="Output details of each node",
+            default=False,
+        )
+        parser.add_argument(
+            "-s",
+            "--simple",
+            action="store_true",
+            help="Simplified Output",
+            default=False,
+        )
+
+        if argv[0] == "":
+            argv = None
+        try:
+            args = parser.parse_args(argv)
+        except SystemExit:
+            return None
+
         return {
             "pid": int(args.pid, 0) if args.pid else None,
             "seqmin": int(args.min, 0) if args.min else 0,
             "seqmax": int(args.max, 0) if args.max else 0xFFFFFFFF,
             "used": args.used,
             "free": args.free,
             "addr": int(args.addr, 0) if args.addr else None,
+            "simple": args.simple,
+            "detail": args.detail,
+            "biggest": args.biggest,
+            "orphan": args.orphan,
+            "top": int(args.top) if args.top else 30,
         }
 
     def invoke(self, args, from_tty):
-        if gdb.lookup_type("size_t").sizeof == 4:
+        if sizeof_size_t == 4:
             self.align = 11
         else:
             self.align = 19
 
         arg = self.parse_arguments(args.split(" "))
 
+        if arg is None:
+            return
+
         pid = PID_MM_ALLOC
         if arg["used"]:
             pid = PID_MM_ALLOC
         elif arg["free"]:
-            pid = PID_MM_LEAK
+            pid = PID_MM_FREE
+        elif arg["biggest"]:
+            pid = PID_MM_BIGGEST
+        elif arg["orphan"]:
+            pid = PID_MM_ORPHAN
         elif arg["pid"]:
             pid = arg["pid"]
+        if CONFIG_MM_BACKTRACE <= 0:
+            arg["detail"] = True
 
         self.aordblks = 0
         self.uordblks = 0
-        self.memdump(pid, arg["seqmin"], arg["seqmax"], arg["addr"])
+        self.backtrace_dict = {}
+        self.npidhash = gdb.parse_and_eval("g_npidhash")
+        self.pidhash = gdb.parse_and_eval("g_pidhash")
+        self.memdump(
+            pid,
+            arg["seqmin"],
+            arg["seqmax"],
+            arg["addr"],
+            arg["simple"],
+            arg["detail"],
+            arg["top"],
+        )
+
+
+class Memleak(gdb.Command):
+    """Memleak check"""
+
+    def __init__(self):
+        super(Memleak, self).__init__("memleak", gdb.COMMAND_USER)
+
+    def check_alive(self, pid):
+        return self.pidhash[pid & self.npidhash - 1] != 0
+
+    def next_ptr(self):
+        inf = gdb.selected_inferior()
+        heap = gdb.parse_and_eval("g_mmheap")
+        longsize = get_long_type().sizeof
+        region = get_symbol_value("CONFIG_MM_REGIONS")
+        regions = []
+
+        for i in range(0, region):
+            start = int(heap["mm_heapstart"][i])
+            end = int(heap["mm_heapend"][i])
+            regions.append({"start": start, "end": end})
+
+        # Search global variables
+        for objfile in gdb.objfiles():
+            gdb.write(f"Searching global symbol in: {objfile.filename}\n")
+            elf = self.elf.load_from_path(objfile.filename)
+            symtab = elf.get_section_by_name(".symtab")
+            for symbol in symtab.iter_symbols():
+                if symbol["st_info"]["type"] != "STT_OBJECT":
+                    continue
+
+                if symbol["st_size"] < longsize:
+                    continue
+
+                global_size = symbol["st_size"] // longsize * longsize
+                global_mem = inf.read_memory(symbol["st_value"], global_size)
+                while global_size:
+                    global_size = global_size - longsize
+                    ptr = read_ulong(global_mem, global_size)
+                    for region in regions:
+                        if ptr >= region["start"] and ptr < region["end"]:
+                            yield ptr
+                            break
+
+        gdb.write("Searching in grey memory\n")
+        for node in self.grey_list:
+            addr = node["addr"]
+            mem = inf.read_memory(addr, node["size"])
+            i = 0
+            while i < node["size"]:
+                ptr = read_ulong(mem, i)
+                for region in regions:
+                    if ptr >= region["start"] and ptr < region["end"]:
+                        yield ptr
+                        break
+                i = i + longsize
+
+    def collect_white_dict(self):
+        white_dict = {}
+        allocnode_size = mm_allocnode_type.sizeof
+
+        # collect all user malloc ptr
+
+        heap = gdb.parse_and_eval("g_mmheap")
+        for node in mm_foreach(heap):
+            if node["size"] & MM_ALLOC_BIT != 0 and node["pid"] != 
PID_MM_MEMPOOL:
+                addr = int(node) + allocnode_size
+
+                node_dict = {}
+                node_dict["node"] = node
+                node_dict["size"] = mm_nodesize(node["size"]) - allocnode_size
+                node_dict["addr"] = addr
+                white_dict[int(addr)] = node_dict
+
+        if heap.type.has_key("mm_mpool"):
+            for pool in mempool_multiple_foreach(heap["mm_mpool"]):
+                for buf in mempool_foreach(pool):
+                    if buf["magic"] == MEMPOOL_MAGIC_ALLOC:
+                        addr = int(buf) - pool["blocksize"]
+
+                        buf_dict = {}
+                        buf_dict["node"] = buf
+                        buf_dict["size"] = pool["blocksize"]
+                        buf_dict["addr"] = addr
+                        white_dict[int(addr)] = buf_dict
+
+        return white_dict
+
+    def parse_arguments(self, argv):
+        parser = argparse.ArgumentParser(description="memleak command")
+        parser.add_argument(
+            "-s",
+            "--simple",
+            action="store_true",
+            help="Simplified Output",
+            default=False,
+        )
+        parser.add_argument(
+            "-d",
+            "--detail",
+            action="store_true",
+            help="Output details of each node",
+            default=False,
+        )
+
+        if argv[0] == "":
+            argv = None
+        try:
+            args = parser.parse_args(argv)
+        except SystemExit:
+            return None
+
+        return {"simple": args.simple, "detail": args.detail}
+
+    def invoke(self, args, from_tty):
+        self.elf = utils.import_check(
+            "elftools.elf.elffile", "ELFFile", "Plase pip install pyelftools\n"
+        )
+        if not self.elf:
+            return
+
+        if sizeof_size_t == 4:
+            align = 11
+        else:
+            align = 19
+
+        arg = self.parse_arguments(args.split(" "))
+
+        if arg is None:
+            return
+
+        if CONFIG_MM_BACKTRACE < 0:
+            gdb.write("Need to set CONFIG_MM_BACKTRACE to 8 or 16 better.\n")
+            return
+        elif CONFIG_MM_BACKTRACE == 0:
+            gdb.write("CONFIG_MM_BACKTRACE is 0, no backtrace available\n")
+
+        start = last = time.time()
+        white_dict = self.collect_white_dict()
+
+        self.grey_list = []
+        gdb.write("Searching for leaked memory, please wait a moment\n")
+        last = time.time()
+
+        sorted_keys = sorted(white_dict.keys())
+        for ptr in self.next_ptr():
+            # Find a closest addres in white_dict
+            pos = bisect.bisect_right(sorted_keys, ptr)
+            if pos == 0:
+                continue
+            grey_key = sorted_keys[pos - 1]
+            if grey_key in white_dict and ptr < grey_key + 
white_dict[grey_key]["size"]:
+                self.grey_list.append(white_dict[grey_key])
+                del white_dict[grey_key]
+
+        # All white node is leak
+
+        gdb.write(f"Search all memory use {(time.time() - last):.2f} 
seconds\n")
+
+        gdb.write("\n")
+        if len(white_dict) == 0:
+            gdb.write("All node have references, no memory leak!\n")

Review Comment:
   ```suggestion
               gdb.write("All nodes have references, no memory leak!\n")
   ```



##########
tools/gdb/utils.py:
##########
@@ -183,8 +363,88 @@ def is_target_smp():
         return False
 
 
-def get_symbol_value(name):
-    """Return the value of a symbol value etc: Variable, Marco"""
+# FIXME: support RISC-V/X86/ARM64 etc.
+def in_interrupt_context(cpuid=0):
+    frame = gdb.selected_frame()
+
+    if is_target_arch("arm"):
+        xpsr = int(frame.read_register("xpsr"))
+        return xpsr & 0xF
+    else:
+        # TODO: figure out a more proper way to detect if
+        # we are in an interrupt context
+        g_current_regs = gdb_eval_or_none("g_current_regs")
+        return not g_current_regs or not g_current_regs[cpuid]
+
+
+def get_arch_sp_name():
+    if is_target_arch("arm"):
+        # arm and arm variants
+        return "sp"
+    if is_target_arch("aarch64"):
+        return "sp"
+    elif is_target_arch("i386", exact=True):
+        return "esp"
+    elif is_target_arch("i386:x86-64", exact=True):
+        return "rsp"
+    else:
+        # Default to use sp, add more archs if needed
+        return "sp"
+
+
+def get_arch_pc_name():
+    if is_target_arch("arm"):
+        # arm and arm variants
+        return "pc"
+    if is_target_arch("aarch64"):
+        return "pc"
+    elif is_target_arch("i386", exact=True):
+        return "eip"
+    elif is_target_arch("i386:x86-64", exact=True):
+        return "rip"
+    else:
+        # Default to use pc, add more archs if needed
+        return "pc"
+
+
+def get_register_byname(regname, tcb=None):
+    frame = gdb.selected_frame()
+
+    # If no tcb is given then we can directly used the register from

Review Comment:
   ```suggestion
       # If no tcb is given then we can directly use the register from
   ```



##########
tools/gdb/macros.py:
##########
@@ -0,0 +1,214 @@
+############################################################################
+# tools/gdb/macros.py
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.  The
+# ASF licenses this file to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance with the
+# License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+############################################################################
+
+# NOTE: GDB stores macro information based on the current stack frame's scope,
+# including the source file and line number. Therefore, there may be missing
+# macro definitions when you are at different stack frames.
+#
+# To resolve this issue, we need to retrieve all macro information from the 
ELF file
+# then parse and evaluate it by ourselves.
+#
+# There might be two ways to achieve this, one is to leverage the C 
preprocessor
+# to directly preprocess all the macros instereted into python constants
+# gcc -E -x c -P <file_with_macros> -I/path/to/nuttx/include
+#
+# While the other way is to leverage the dwarf info stored in the ELF file,
+# with -g3 switch, we have a `.debug_macro` section containing all the 
information
+# about the macros.
+#
+# Currently, we using the second method.
+
+import os
+import re
+import subprocess
+import tempfile
+
+PUNCTUATORS = [
+    "\[",
+    "\]",
+    "\(",
+    "\)",
+    "\{",
+    "\}",
+    "\?",
+    ";",
+    ",",
+    "~",
+    "\.\.\.",
+    "\.",
+    "\-\>",
+    "\-\-",
+    "\-\=",
+    "\-",
+    "\+\+",
+    "\+\=",
+    "\+",
+    "\*\=",
+    "\*",
+    "\!\=",
+    "\!",
+    "\&\&",
+    "\&\=",
+    "\&",
+    "\/\=",
+    "\/",
+    "\%\>",
+    "%:%:",
+    "%:",
+    "%=",
+    "%",
+    "\^\=",
+    "\^",
+    "\#\#",
+    "\#",
+    "\:\>",
+    "\:",
+    "\|\|",
+    "\|\=",
+    "\|",
+    "<<=",
+    "<<",
+    "<=",
+    "<:",
+    "<%",
+    "<",
+    ">>=",
+    ">>",
+    ">=",
+    ">",
+    "\=\=",
+    "\=",
+]
+
+
+def parse_macro(line, macros, pattern):
+    # grep name, value
+    # the first group matches the token, the second matches the replacement
+    m = pattern.match(line)
+    if not m:
+        return False
+
+    name, value = m.group(1), m.group(2)
+
+    if name in macros:
+        # FIXME: what should we do if we got a redefinition/duplication here?
+        # for now I think it's ok just overwrite the old value
+        pass
+
+    # emplace, for all undefined macros we evalute it to zero
+    macros[name] = value if value else "0"
+
+    return True
+
+
+def fetch_macro_info(file):
+    if not os.path.isfile(file):
+        raise FileNotFoundError("No given ELF target found")
+
+    # FIXME: we don't use subprocess here because
+    # it's broken on some GDB distribution :(, I haven't
+    # found a solution to it.
+
+    with tempfile.NamedTemporaryFile(delete=False) as f1:
+
+        # # os.system(f"readelf -wm {file} > {output}")
+        process = subprocess.Popen(
+            f"readelf -wm {file}", shell=True, stdout=f1, 
stderr=subprocess.STDOUT
+        )
+
+        process.communicate()
+        errcode = process.returncode
+
+        f1.close()
+
+        if errcode != 0:
+            return {}
+
+        p = re.compile(".*macro[ ]*:[ ]*([\S]+\(.*?\)|[\w]+)[ ]*(.*)")
+        macros = {}
+
+        with open(f1.name, "rb") as f2:
+            for line in f2.readlines():
+                line = line.decode("utf-8")
+                if not line.startswith(" DW_MACRO_define") and not 
line.startswith(
+                    " DW_MACRO_undef"
+                ):
+                    continue
+
+                if not parse_macro(line, macros, p):
+                    print(f"Failed to parse {line}")
+
+    return macros
+
+
+def split_tokens(expr):
+    p = "(" + "|".join(PUNCTUATORS) + ")"
+    res = list(
+        filter(lambda e: e != "", map(lambda e: e.rstrip().lstrip(), 
re.split(p, expr)))
+    )
+    return res
+
+
+def do_expand(expr, macro_map):
+    if expr in PUNCTUATORS:
+        return expr
+
+    tokens = split_tokens(expr)
+
+    res = []
+
+    for t in tokens:
+        if t not in macro_map:
+            res.append(t)
+            continue
+        res += do_expand(macro_map[t], macro_map)
+
+    return res
+
+
+# NOTE: Implement a fully functional parser which can
+# preprocessing all the C marcos according to ISO 9899 standard
+# may be an overkill, what we really care about are those
+# macros that can be evaluted to an constant value.

Review Comment:
   ```suggestion
   # macros that can be evaluated to a constant value.
   ```



##########
tools/gdb/memdump.py:
##########
@@ -278,35 +547,463 @@ def parse_arguments(self, argv):
         parser.add_argument("-x", "--max", type=str, help="Maximum value")
         parser.add_argument("--used", action="store_true", help="Used flag")
         parser.add_argument("--free", action="store_true", help="Free flag")
-        args = parser.parse_args(args=(None if len(argv) == 1 else argv))
+        parser.add_argument("--biggest", action="store_true", help="biggest 
allocated")
+        parser.add_argument("--top", type=str, help="biggest top n, default 
30")
+        parser.add_argument(
+            "--orphan", action="store_true", help="orphan allocated(neighbor 
of free)"
+        )
+        parser.add_argument(
+            "-d",
+            "--detail",
+            action="store_true",
+            help="Output details of each node",
+            default=False,
+        )
+        parser.add_argument(
+            "-s",
+            "--simple",
+            action="store_true",
+            help="Simplified Output",
+            default=False,
+        )
+
+        if argv[0] == "":
+            argv = None
+        try:
+            args = parser.parse_args(argv)
+        except SystemExit:
+            return None
+
         return {
             "pid": int(args.pid, 0) if args.pid else None,
             "seqmin": int(args.min, 0) if args.min else 0,
             "seqmax": int(args.max, 0) if args.max else 0xFFFFFFFF,
             "used": args.used,
             "free": args.free,
             "addr": int(args.addr, 0) if args.addr else None,
+            "simple": args.simple,
+            "detail": args.detail,
+            "biggest": args.biggest,
+            "orphan": args.orphan,
+            "top": int(args.top) if args.top else 30,
         }
 
     def invoke(self, args, from_tty):
-        if gdb.lookup_type("size_t").sizeof == 4:
+        if sizeof_size_t == 4:
             self.align = 11
         else:
             self.align = 19
 
         arg = self.parse_arguments(args.split(" "))
 
+        if arg is None:
+            return
+
         pid = PID_MM_ALLOC
         if arg["used"]:
             pid = PID_MM_ALLOC
         elif arg["free"]:
-            pid = PID_MM_LEAK
+            pid = PID_MM_FREE
+        elif arg["biggest"]:
+            pid = PID_MM_BIGGEST
+        elif arg["orphan"]:
+            pid = PID_MM_ORPHAN
         elif arg["pid"]:
             pid = arg["pid"]
+        if CONFIG_MM_BACKTRACE <= 0:
+            arg["detail"] = True
 
         self.aordblks = 0
         self.uordblks = 0
-        self.memdump(pid, arg["seqmin"], arg["seqmax"], arg["addr"])
+        self.backtrace_dict = {}
+        self.npidhash = gdb.parse_and_eval("g_npidhash")
+        self.pidhash = gdb.parse_and_eval("g_pidhash")
+        self.memdump(
+            pid,
+            arg["seqmin"],
+            arg["seqmax"],
+            arg["addr"],
+            arg["simple"],
+            arg["detail"],
+            arg["top"],
+        )
+
+
+class Memleak(gdb.Command):
+    """Memleak check"""
+
+    def __init__(self):
+        super(Memleak, self).__init__("memleak", gdb.COMMAND_USER)
+
+    def check_alive(self, pid):
+        return self.pidhash[pid & self.npidhash - 1] != 0
+
+    def next_ptr(self):
+        inf = gdb.selected_inferior()
+        heap = gdb.parse_and_eval("g_mmheap")
+        longsize = get_long_type().sizeof
+        region = get_symbol_value("CONFIG_MM_REGIONS")
+        regions = []
+
+        for i in range(0, region):
+            start = int(heap["mm_heapstart"][i])
+            end = int(heap["mm_heapend"][i])
+            regions.append({"start": start, "end": end})
+
+        # Search global variables
+        for objfile in gdb.objfiles():
+            gdb.write(f"Searching global symbol in: {objfile.filename}\n")
+            elf = self.elf.load_from_path(objfile.filename)
+            symtab = elf.get_section_by_name(".symtab")
+            for symbol in symtab.iter_symbols():
+                if symbol["st_info"]["type"] != "STT_OBJECT":
+                    continue
+
+                if symbol["st_size"] < longsize:
+                    continue
+
+                global_size = symbol["st_size"] // longsize * longsize
+                global_mem = inf.read_memory(symbol["st_value"], global_size)
+                while global_size:
+                    global_size = global_size - longsize
+                    ptr = read_ulong(global_mem, global_size)
+                    for region in regions:
+                        if ptr >= region["start"] and ptr < region["end"]:
+                            yield ptr
+                            break
+
+        gdb.write("Searching in grey memory\n")
+        for node in self.grey_list:
+            addr = node["addr"]
+            mem = inf.read_memory(addr, node["size"])
+            i = 0
+            while i < node["size"]:
+                ptr = read_ulong(mem, i)
+                for region in regions:
+                    if ptr >= region["start"] and ptr < region["end"]:
+                        yield ptr
+                        break
+                i = i + longsize
+
+    def collect_white_dict(self):
+        white_dict = {}
+        allocnode_size = mm_allocnode_type.sizeof
+
+        # collect all user malloc ptr
+
+        heap = gdb.parse_and_eval("g_mmheap")
+        for node in mm_foreach(heap):
+            if node["size"] & MM_ALLOC_BIT != 0 and node["pid"] != 
PID_MM_MEMPOOL:
+                addr = int(node) + allocnode_size
+
+                node_dict = {}
+                node_dict["node"] = node
+                node_dict["size"] = mm_nodesize(node["size"]) - allocnode_size
+                node_dict["addr"] = addr
+                white_dict[int(addr)] = node_dict
+
+        if heap.type.has_key("mm_mpool"):
+            for pool in mempool_multiple_foreach(heap["mm_mpool"]):
+                for buf in mempool_foreach(pool):
+                    if buf["magic"] == MEMPOOL_MAGIC_ALLOC:
+                        addr = int(buf) - pool["blocksize"]
+
+                        buf_dict = {}
+                        buf_dict["node"] = buf
+                        buf_dict["size"] = pool["blocksize"]
+                        buf_dict["addr"] = addr
+                        white_dict[int(addr)] = buf_dict
+
+        return white_dict
+
+    def parse_arguments(self, argv):
+        parser = argparse.ArgumentParser(description="memleak command")
+        parser.add_argument(
+            "-s",
+            "--simple",
+            action="store_true",
+            help="Simplified Output",
+            default=False,
+        )
+        parser.add_argument(
+            "-d",
+            "--detail",
+            action="store_true",
+            help="Output details of each node",
+            default=False,
+        )
+
+        if argv[0] == "":
+            argv = None
+        try:
+            args = parser.parse_args(argv)
+        except SystemExit:
+            return None
+
+        return {"simple": args.simple, "detail": args.detail}
+
+    def invoke(self, args, from_tty):
+        self.elf = utils.import_check(
+            "elftools.elf.elffile", "ELFFile", "Plase pip install pyelftools\n"
+        )
+        if not self.elf:
+            return
+
+        if sizeof_size_t == 4:
+            align = 11
+        else:
+            align = 19
+
+        arg = self.parse_arguments(args.split(" "))
+
+        if arg is None:
+            return
+
+        if CONFIG_MM_BACKTRACE < 0:
+            gdb.write("Need to set CONFIG_MM_BACKTRACE to 8 or 16 better.\n")
+            return
+        elif CONFIG_MM_BACKTRACE == 0:
+            gdb.write("CONFIG_MM_BACKTRACE is 0, no backtrace available\n")
+
+        start = last = time.time()
+        white_dict = self.collect_white_dict()
+
+        self.grey_list = []
+        gdb.write("Searching for leaked memory, please wait a moment\n")
+        last = time.time()
+
+        sorted_keys = sorted(white_dict.keys())
+        for ptr in self.next_ptr():
+            # Find a closest addres in white_dict
+            pos = bisect.bisect_right(sorted_keys, ptr)
+            if pos == 0:
+                continue
+            grey_key = sorted_keys[pos - 1]
+            if grey_key in white_dict and ptr < grey_key + 
white_dict[grey_key]["size"]:
+                self.grey_list.append(white_dict[grey_key])
+                del white_dict[grey_key]
+
+        # All white node is leak
+
+        gdb.write(f"Search all memory use {(time.time() - last):.2f} 
seconds\n")
+
+        gdb.write("\n")
+        if len(white_dict) == 0:
+            gdb.write("All node have references, no memory leak!\n")
+            return
+
+        gdb.write("Leak catch!, use '\x1b[33;1m*\x1b[m' mark pid is not 
exist:\n")

Review Comment:
   ```suggestion
           gdb.write("Leak catch!, use '\x1b[33;1m*\x1b[m' mark pid does not 
exist:\n")
   ```



##########
tools/gdb/macros.py:
##########
@@ -0,0 +1,214 @@
+############################################################################
+# tools/gdb/macros.py
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.  The
+# ASF licenses this file to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance with the
+# License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+############################################################################
+
+# NOTE: GDB stores macro information based on the current stack frame's scope,
+# including the source file and line number. Therefore, there may be missing
+# macro definitions when you are at different stack frames.
+#
+# To resolve this issue, we need to retrieve all macro information from the 
ELF file
+# then parse and evaluate it by ourselves.
+#
+# There might be two ways to achieve this, one is to leverage the C 
preprocessor
+# to directly preprocess all the macros instereted into python constants
+# gcc -E -x c -P <file_with_macros> -I/path/to/nuttx/include
+#
+# While the other way is to leverage the dwarf info stored in the ELF file,
+# with -g3 switch, we have a `.debug_macro` section containing all the 
information
+# about the macros.
+#
+# Currently, we using the second method.
+
+import os
+import re
+import subprocess
+import tempfile
+
+PUNCTUATORS = [
+    "\[",
+    "\]",
+    "\(",
+    "\)",
+    "\{",
+    "\}",
+    "\?",
+    ";",
+    ",",
+    "~",
+    "\.\.\.",
+    "\.",
+    "\-\>",
+    "\-\-",
+    "\-\=",
+    "\-",
+    "\+\+",
+    "\+\=",
+    "\+",
+    "\*\=",
+    "\*",
+    "\!\=",
+    "\!",
+    "\&\&",
+    "\&\=",
+    "\&",
+    "\/\=",
+    "\/",
+    "\%\>",
+    "%:%:",
+    "%:",
+    "%=",
+    "%",
+    "\^\=",
+    "\^",
+    "\#\#",
+    "\#",
+    "\:\>",
+    "\:",
+    "\|\|",
+    "\|\=",
+    "\|",
+    "<<=",
+    "<<",
+    "<=",
+    "<:",
+    "<%",
+    "<",
+    ">>=",
+    ">>",
+    ">=",
+    ">",
+    "\=\=",
+    "\=",
+]
+
+
+def parse_macro(line, macros, pattern):
+    # grep name, value
+    # the first group matches the token, the second matches the replacement
+    m = pattern.match(line)
+    if not m:
+        return False
+
+    name, value = m.group(1), m.group(2)
+
+    if name in macros:
+        # FIXME: what should we do if we got a redefinition/duplication here?
+        # for now I think it's ok just overwrite the old value
+        pass
+
+    # emplace, for all undefined macros we evalute it to zero
+    macros[name] = value if value else "0"
+
+    return True
+
+
+def fetch_macro_info(file):
+    if not os.path.isfile(file):
+        raise FileNotFoundError("No given ELF target found")
+
+    # FIXME: we don't use subprocess here because
+    # it's broken on some GDB distribution :(, I haven't
+    # found a solution to it.
+
+    with tempfile.NamedTemporaryFile(delete=False) as f1:
+
+        # # os.system(f"readelf -wm {file} > {output}")
+        process = subprocess.Popen(
+            f"readelf -wm {file}", shell=True, stdout=f1, 
stderr=subprocess.STDOUT
+        )
+
+        process.communicate()
+        errcode = process.returncode
+
+        f1.close()
+
+        if errcode != 0:
+            return {}
+
+        p = re.compile(".*macro[ ]*:[ ]*([\S]+\(.*?\)|[\w]+)[ ]*(.*)")
+        macros = {}
+
+        with open(f1.name, "rb") as f2:
+            for line in f2.readlines():
+                line = line.decode("utf-8")
+                if not line.startswith(" DW_MACRO_define") and not 
line.startswith(
+                    " DW_MACRO_undef"
+                ):
+                    continue
+
+                if not parse_macro(line, macros, p):
+                    print(f"Failed to parse {line}")
+
+    return macros
+
+
+def split_tokens(expr):
+    p = "(" + "|".join(PUNCTUATORS) + ")"
+    res = list(
+        filter(lambda e: e != "", map(lambda e: e.rstrip().lstrip(), 
re.split(p, expr)))
+    )
+    return res
+
+
+def do_expand(expr, macro_map):
+    if expr in PUNCTUATORS:
+        return expr
+
+    tokens = split_tokens(expr)
+
+    res = []
+
+    for t in tokens:
+        if t not in macro_map:
+            res.append(t)
+            continue
+        res += do_expand(macro_map[t], macro_map)
+
+    return res
+
+
+# NOTE: Implement a fully functional parser which can
+# preprocessing all the C marcos according to ISO 9899 standard

Review Comment:
   ```suggestion
   # preprocess all the C macros according to ISO 9899 standard
   ```



##########
tools/gdb/macros.py:
##########
@@ -0,0 +1,214 @@
+############################################################################
+# tools/gdb/macros.py
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.  The
+# ASF licenses this file to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance with the
+# License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+############################################################################
+
+# NOTE: GDB stores macro information based on the current stack frame's scope,
+# including the source file and line number. Therefore, there may be missing
+# macro definitions when you are at different stack frames.
+#
+# To resolve this issue, we need to retrieve all macro information from the 
ELF file
+# then parse and evaluate it by ourselves.
+#
+# There might be two ways to achieve this, one is to leverage the C 
preprocessor
+# to directly preprocess all the macros instereted into python constants

Review Comment:
   ```suggestion
   # to directly preprocess all the macros interpreted into python constants
   ```
   
   Alternatively, did you mean "inserted" rather than "interpreted"?
   



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscr...@nuttx.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org

Reply via email to