https://github.com/zeyi2 updated 
https://github.com/llvm/llvm-project/pull/166072

>From 7b4e4172fc3cea8e8194aa3544f2c2ec30e3616a Mon Sep 17 00:00:00 2001
From: mtx <[email protected]>
Date: Sun, 2 Nov 2025 22:56:53 +0800
Subject: [PATCH 01/12] [clang-tidy][docs] Implement alphabetical order check

---
 .../clang-tidy-alphabetical-order-check.py    | 301 ++++++++++++++++++
 .../infrastructure/alphabetical-order.cpp     |   6 +
 clang-tools-extra/test/lit.cfg.py             |   1 +
 3 files changed, 308 insertions(+)
 create mode 100644 
clang-tools-extra/clang-tidy/tool/clang-tidy-alphabetical-order-check.py
 create mode 100644 
clang-tools-extra/test/clang-tidy/infrastructure/alphabetical-order.cpp

diff --git 
a/clang-tools-extra/clang-tidy/tool/clang-tidy-alphabetical-order-check.py 
b/clang-tools-extra/clang-tidy/tool/clang-tidy-alphabetical-order-check.py
new file mode 100644
index 0000000000000..321663bb7d577
--- /dev/null
+++ b/clang-tools-extra/clang-tidy/tool/clang-tidy-alphabetical-order-check.py
@@ -0,0 +1,301 @@
+#!/usr/bin/env python3
+
+"""
+Normalize clang-tidy docs with deterministic sorting for linting/tests.
+
+Subcommands:
+  - checks-list: Sort entries in docs/clang-tidy/checks/list.rst csv-table.
+  - release-notes: Sort key sections in docs/ReleaseNotes.rst and de-duplicate
+                   entries in "Changes in existing checks".
+
+Usage:
+  clang-tidy-alphabetical-order-check.py <subcommand> [-i <input rst>] [-o 
<output rst>] [--fix]
+
+Flags:
+  -i/--input   Input file.
+  -o/--output  Write normalized content here; omit to write to stdout.
+  --fix        Rewrite the input file in place. Cannot be combined with 
-o/--output.
+"""
+
+import argparse
+import io
+import os
+import re
+import sys
+from typing import List, Optional, Sequence, Tuple
+
+DOC_LABEL_RN_RE = re.compile(r":doc:`(?P<label>[^`<]+)\s*(?:<[^>]+>)?`")
+DOC_LINE_RE = re.compile(r"^\s*:doc:`(?P<label>[^`<]+?)\s*<[^>]+>`.*$")
+
+
+def script_dir() -> str:
+    return os.path.dirname(os.path.abspath(__file__))
+
+
+def read_text(path: str) -> List[str]:
+    with io.open(path, "r", encoding="utf-8") as f:
+        return f.read().splitlines(True)
+
+
+def write_text(path: str, content: str) -> None:
+    with io.open(path, "w", encoding="utf-8", newline="") as f:
+        f.write(content)
+
+
+def normalize_list_rst(lines: List[str]) -> str:
+    out: List[str] = []
+    i = 0
+    n = len(lines)
+    while i < n:
+        out.append(lines[i])
+        if lines[i].lstrip().startswith(".. csv-table::"):
+            i += 1
+            break
+        i += 1
+
+    while i < n and (lines[i].startswith(" ") or lines[i].strip() == ""):
+        if DOC_LINE_RE.match(lines[i]):
+            break
+        out.append(lines[i])
+        i += 1
+
+    entries: List[str] = []
+    while i < n and lines[i].startswith(" "):
+        if DOC_LINE_RE.match(lines[i]):
+            entries.append(lines[i])
+        else:
+            entries.append(lines[i])
+        i += 1
+
+    def key_for(line: str):
+        m = DOC_LINE_RE.match(line)
+        if not m:
+            return (1, "")
+        return (0, m.group("label"))
+
+    entries_sorted = sorted(entries, key=key_for)
+    out.extend(entries_sorted)
+    out.extend(lines[i:])
+
+    return "".join(out)
+
+
+def run_checks_list(
+    inp: Optional[str], out_path: Optional[str], fix: bool
+) -> int:
+    if not inp:
+        inp = os.path.normpath(
+            os.path.join(
+                script_dir(),
+                "..",
+                "..",
+                "docs",
+                "clang-tidy",
+                "checks",
+                "list.rst",
+            )
+        )
+    lines = read_text(inp)
+    normalized = normalize_list_rst(lines)
+    if fix and out_path:
+        sys.stderr.write("error: --fix cannot be used together with 
--output\n")
+        return 2
+    if fix:
+        original = "".join(lines)
+        if original != normalized:
+            write_text(inp, normalized)
+        return 0
+    if out_path:
+        write_text(out_path, normalized)
+        return 0
+    sys.stdout.write(normalized)
+    return 0
+
+
+def find_heading(lines: Sequence[str], title: str) -> Optional[int]:
+    for i in range(len(lines) - 1):
+        if lines[i].rstrip("\n") == title:
+            underline = lines[i + 1].rstrip("\n")
+            if (
+                underline
+                and set(underline) == {"^"}
+                and len(underline) >= len(title)
+            ):
+                return i
+    return None
+
+
+def extract_label(text: str) -> str:
+    m = DOC_LABEL_RN_RE.search(text)
+    return m.group("label") if m else text
+
+
+def is_bullet_start(line: str) -> bool:
+    return line.startswith("- ")
+
+
+def collect_bullet_blocks(
+    lines: Sequence[str], start: int, end: int
+) -> Tuple[List[str], List[Tuple[str, List[str]]], List[str]]:
+    i = start
+    n = end
+    first_bullet = i
+    while first_bullet < n and not is_bullet_start(lines[first_bullet]):
+        first_bullet += 1
+    prefix = list(lines[i:first_bullet])
+
+    blocks: List[Tuple[str, List[str]]] = []
+    i = first_bullet
+    while i < n:
+        if not is_bullet_start(lines[i]):
+            break
+        bstart = i
+        i += 1
+        while i < n and not is_bullet_start(lines[i]):
+            if (
+                i + 1 < n
+                and set(lines[i + 1].rstrip("\n")) == {"^"}
+                and lines[i].strip()
+            ):
+                break
+            i += 1
+        block = list(lines[bstart:i])
+        key = extract_label(block[0])
+        blocks.append((key, block))
+
+    suffix = list(lines[i:n])
+    return prefix, blocks, suffix
+
+
+def sort_and_dedup_blocks(
+    blocks: List[Tuple[str, List[str]]], dedup: bool = False
+) -> List[List[str]]:
+    seen = set()
+    filtered: List[Tuple[str, List[str]]] = []
+    for key, block in blocks:
+        if dedup:
+            if key in seen:
+                continue
+            seen.add(key)
+        filtered.append((key, block))
+    filtered.sort(key=lambda kb: kb[0])
+    return [b for _, b in filtered]
+
+
+def normalize_release_notes(lines: List[str]) -> str:
+    sections = [
+        ("New checks", False),
+        ("New check aliases", False),
+        ("Changes in existing checks", True),
+    ]
+
+    out = list(lines)
+
+    for idx in range(len(sections) - 1, -1, -1):
+        title, dedup = sections[idx]
+        h_start = find_heading(out, title)
+
+        if h_start is None:
+            continue
+
+        sec_start = h_start + 2
+
+        if idx + 1 < len(sections):
+            next_title = sections[idx + 1][0]
+            h_end = find_heading(out, next_title)
+            if h_end is None:
+                h_end = sec_start
+                while h_end + 1 < len(out):
+                    if out[h_end].strip() and set(
+                        out[h_end + 1].rstrip("\n")
+                    ) == {"^"}:
+                        break
+                    h_end += 1
+            sec_end = h_end
+        else:
+            h_end = sec_start
+            while h_end + 1 < len(out):
+                if out[h_end].strip() and set(out[h_end + 1].rstrip("\n")) == {
+                    "^"
+                }:
+                    break
+                h_end += 1
+            sec_end = h_end
+
+        prefix, blocks, suffix = collect_bullet_blocks(out, sec_start, sec_end)
+        sorted_blocks = sort_and_dedup_blocks(blocks, dedup=dedup)
+
+        new_section: List[str] = []
+        new_section.extend(prefix)
+        for i_b, b in enumerate(sorted_blocks):
+            if i_b > 0 and (
+                not new_section
+                or (new_section and new_section[-1].strip() != "")
+            ):
+                new_section.append("\n")
+            new_section.extend(b)
+        new_section.extend(suffix)
+
+        out = out[:sec_start] + new_section + out[sec_end:]
+
+    return "".join(out)
+
+
+def run_release_notes(
+    inp: Optional[str], out_path: Optional[str], fix: bool
+) -> int:
+    if not inp:
+        inp = os.path.normpath(
+            os.path.join(script_dir(), "..", "..", "docs", "ReleaseNotes.rst")
+        )
+    lines = read_text(inp)
+    normalized = normalize_release_notes(lines)
+    if fix and out_path:
+        sys.stderr.write("error: --fix cannot be used together with 
--output\n")
+        return 2
+    if fix:
+        original = "".join(lines)
+        if original != normalized:
+            write_text(inp, normalized)
+        return 0
+    if out_path:
+        write_text(out_path, normalized)
+        return 0
+    sys.stdout.write(normalized)
+    return 0
+
+
+def main(argv: List[str]) -> int:
+    ap = argparse.ArgumentParser()
+    sub = ap.add_subparsers(dest="cmd", required=True)
+
+    ap_checks = sub.add_parser(
+        "checks-list", help="normalize clang-tidy checks list.rst"
+    )
+    ap_checks.add_argument("-i", "--input", dest="inp", default=None)
+    ap_checks.add_argument("-o", "--output", dest="out", default=None)
+    ap_checks.add_argument(
+        "--fix", action="store_true", help="rewrite the input file in place"
+    )
+
+    ap_rn = sub.add_parser(
+        "release-notes", help="normalize ReleaseNotes.rst sections"
+    )
+    ap_rn.add_argument("-i", "--input", dest="inp", default=None)
+    ap_rn.add_argument("-o", "--output", dest="out", default=None)
+    ap_rn.add_argument(
+        "--fix", action="store_true", help="rewrite the input file in place"
+    )
+
+    args = ap.parse_args(argv)
+
+    if args.cmd == "checks-list":
+        return run_checks_list(args.inp, args.out, args.fix)
+    if args.cmd == "release-notes":
+        return run_release_notes(args.inp, args.out, args.fix)
+
+    ap.error("unknown command")
+
+
+if __name__ == "__main__":
+    main(sys.argv[1:])
diff --git 
a/clang-tools-extra/test/clang-tidy/infrastructure/alphabetical-order.cpp 
b/clang-tools-extra/test/clang-tidy/infrastructure/alphabetical-order.cpp
new file mode 100644
index 0000000000000..4a2598b93942b
--- /dev/null
+++ b/clang-tools-extra/test/clang-tidy/infrastructure/alphabetical-order.cpp
@@ -0,0 +1,6 @@
+// RUN: %python 
%S/../../../clang-tidy/tool/clang-tidy-alphabetical-order-check.py checks-list 
-i %S/../../../docs/clang-tidy/checks/list.rst -o %t.list
+// RUN: diff --strip-trailing-cr %t.list \
+// RUN:   %S/../../../docs/clang-tidy/checks/list.rst
+
+// RUN: %python 
%S/../../../clang-tidy/tool/clang-tidy-alphabetical-order-check.py 
release-notes -i %S/../../../docs/ReleaseNotes.rst -o %t.rn
+// RUN: diff --strip-trailing-cr %t.rn %S/../../../docs/ReleaseNotes.rst
diff --git a/clang-tools-extra/test/lit.cfg.py 
b/clang-tools-extra/test/lit.cfg.py
index c1da37d61bd61..c39ea29329674 100644
--- a/clang-tools-extra/test/lit.cfg.py
+++ b/clang-tools-extra/test/lit.cfg.py
@@ -57,6 +57,7 @@
 if config.clang_tidy_custom_check:
     config.available_features.add("custom-check")
 python_exec = shlex.quote(config.python_executable)
+config.substitutions.append(("%python", python_exec))
 check_clang_tidy = os.path.join(
     config.test_source_root, "clang-tidy", "check_clang_tidy.py"
 )

>From c08b734ec6337afb2fbfb45fb7574ea8cf82add1 Mon Sep 17 00:00:00 2001
From: mtx <[email protected]>
Date: Sun, 2 Nov 2025 23:25:39 +0800
Subject: [PATCH 02/12] fix format

---
 .../clang-tidy-alphabetical-order-check.py    | 41 +++++++++----------
 .../infrastructure/alphabetical-order.cpp     |  7 +---
 2 files changed, 21 insertions(+), 27 deletions(-)

diff --git 
a/clang-tools-extra/clang-tidy/tool/clang-tidy-alphabetical-order-check.py 
b/clang-tools-extra/clang-tidy/tool/clang-tidy-alphabetical-order-check.py
index 321663bb7d577..680f21ec0e02c 100644
--- a/clang-tools-extra/clang-tidy/tool/clang-tidy-alphabetical-order-check.py
+++ b/clang-tools-extra/clang-tidy/tool/clang-tidy-alphabetical-order-check.py
@@ -1,6 +1,18 @@
 #!/usr/bin/env python3
+#
+# 
===-----------------------------------------------------------------------===#
+#
+# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+# See https://llvm.org/LICENSE.txt for license information.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+#
+# 
===-----------------------------------------------------------------------===#
 
 """
+
+ClangTidy Alphabetical Order Checker
+====================================
+
 Normalize clang-tidy docs with deterministic sorting for linting/tests.
 
 Subcommands:
@@ -80,9 +92,7 @@ def key_for(line: str):
     return "".join(out)
 
 
-def run_checks_list(
-    inp: Optional[str], out_path: Optional[str], fix: bool
-) -> int:
+def run_checks_list(inp: Optional[str], out_path: Optional[str], fix: bool) -> 
int:
     if not inp:
         inp = os.path.normpath(
             os.path.join(
@@ -116,11 +126,7 @@ def find_heading(lines: Sequence[str], title: str) -> 
Optional[int]:
     for i in range(len(lines) - 1):
         if lines[i].rstrip("\n") == title:
             underline = lines[i + 1].rstrip("\n")
-            if (
-                underline
-                and set(underline) == {"^"}
-                and len(underline) >= len(title)
-            ):
+            if underline and set(underline) == {"^"} and len(underline) >= 
len(title):
                 return i
     return None
 
@@ -206,18 +212,14 @@ def normalize_release_notes(lines: List[str]) -> str:
             if h_end is None:
                 h_end = sec_start
                 while h_end + 1 < len(out):
-                    if out[h_end].strip() and set(
-                        out[h_end + 1].rstrip("\n")
-                    ) == {"^"}:
+                    if out[h_end].strip() and set(out[h_end + 1].rstrip("\n")) 
== {"^"}:
                         break
                     h_end += 1
             sec_end = h_end
         else:
             h_end = sec_start
             while h_end + 1 < len(out):
-                if out[h_end].strip() and set(out[h_end + 1].rstrip("\n")) == {
-                    "^"
-                }:
+                if out[h_end].strip() and set(out[h_end + 1].rstrip("\n")) == 
{"^"}:
                     break
                 h_end += 1
             sec_end = h_end
@@ -229,8 +231,7 @@ def normalize_release_notes(lines: List[str]) -> str:
         new_section.extend(prefix)
         for i_b, b in enumerate(sorted_blocks):
             if i_b > 0 and (
-                not new_section
-                or (new_section and new_section[-1].strip() != "")
+                    not new_section or (new_section and 
new_section[-1].strip() != "")
             ):
                 new_section.append("\n")
             new_section.extend(b)
@@ -241,9 +242,7 @@ def normalize_release_notes(lines: List[str]) -> str:
     return "".join(out)
 
 
-def run_release_notes(
-    inp: Optional[str], out_path: Optional[str], fix: bool
-) -> int:
+def run_release_notes(inp: Optional[str], out_path: Optional[str], fix: bool) 
-> int:
     if not inp:
         inp = os.path.normpath(
             os.path.join(script_dir(), "..", "..", "docs", "ReleaseNotes.rst")
@@ -278,9 +277,7 @@ def main(argv: List[str]) -> int:
         "--fix", action="store_true", help="rewrite the input file in place"
     )
 
-    ap_rn = sub.add_parser(
-        "release-notes", help="normalize ReleaseNotes.rst sections"
-    )
+    ap_rn = sub.add_parser("release-notes", help="normalize ReleaseNotes.rst 
sections")
     ap_rn.add_argument("-i", "--input", dest="inp", default=None)
     ap_rn.add_argument("-o", "--output", dest="out", default=None)
     ap_rn.add_argument(
diff --git 
a/clang-tools-extra/test/clang-tidy/infrastructure/alphabetical-order.cpp 
b/clang-tools-extra/test/clang-tidy/infrastructure/alphabetical-order.cpp
index 4a2598b93942b..0ac1484a00561 100644
--- a/clang-tools-extra/test/clang-tidy/infrastructure/alphabetical-order.cpp
+++ b/clang-tools-extra/test/clang-tidy/infrastructure/alphabetical-order.cpp
@@ -1,6 +1,3 @@
-// RUN: %python 
%S/../../../clang-tidy/tool/clang-tidy-alphabetical-order-check.py checks-list 
-i %S/../../../docs/clang-tidy/checks/list.rst -o %t.list
-// RUN: diff --strip-trailing-cr %t.list \
-// RUN:   %S/../../../docs/clang-tidy/checks/list.rst
+// RUN: %python 
%S/../../../clang-tidy/tool/clang-tidy-alphabetical-order-check.py checks-list 
-i %S/../../../docs/clang-tidy/checks/list.rst | diff --strip-trailing-cr - 
%S/../../../docs/clang-tidy/checks/list.rst
 
-// RUN: %python 
%S/../../../clang-tidy/tool/clang-tidy-alphabetical-order-check.py 
release-notes -i %S/../../../docs/ReleaseNotes.rst -o %t.rn
-// RUN: diff --strip-trailing-cr %t.rn %S/../../../docs/ReleaseNotes.rst
+// RUN: %python 
%S/../../../clang-tidy/tool/clang-tidy-alphabetical-order-check.py 
release-notes -i %S/../../../docs/ReleaseNotes.rst | diff --strip-trailing-cr - 
%S/../../../docs/ReleaseNotes.rst

>From 20b66111f4ba000a5cbcba45b62674e42a4f1f1e Mon Sep 17 00:00:00 2001
From: mtx <[email protected]>
Date: Sun, 2 Nov 2025 23:28:13 +0800
Subject: [PATCH 03/12] ~

---
 .../clang-tidy/tool/clang-tidy-alphabetical-order-check.py      | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/clang-tools-extra/clang-tidy/tool/clang-tidy-alphabetical-order-check.py 
b/clang-tools-extra/clang-tidy/tool/clang-tidy-alphabetical-order-check.py
index 680f21ec0e02c..fbb55efa536ff 100644
--- a/clang-tools-extra/clang-tidy/tool/clang-tidy-alphabetical-order-check.py
+++ b/clang-tools-extra/clang-tidy/tool/clang-tidy-alphabetical-order-check.py
@@ -231,7 +231,7 @@ def normalize_release_notes(lines: List[str]) -> str:
         new_section.extend(prefix)
         for i_b, b in enumerate(sorted_blocks):
             if i_b > 0 and (
-                    not new_section or (new_section and 
new_section[-1].strip() != "")
+                not new_section or (new_section and new_section[-1].strip() != 
"")
             ):
                 new_section.append("\n")
             new_section.extend(b)

>From c7dc5e99207804473cdee184bc8b435874fbf5c4 Mon Sep 17 00:00:00 2001
From: mtx <[email protected]>
Date: Mon, 3 Nov 2025 11:36:52 +0800
Subject: [PATCH 04/12] refactor

---
 .../clang-tidy-alphabetical-order-check.py    | 259 +++++++++---------
 .../infrastructure/alphabetical-order.cpp     |   6 +-
 2 files changed, 126 insertions(+), 139 deletions(-)

diff --git 
a/clang-tools-extra/clang-tidy/tool/clang-tidy-alphabetical-order-check.py 
b/clang-tools-extra/clang-tidy/tool/clang-tidy-alphabetical-order-check.py
index fbb55efa536ff..58d93dcf31235 100644
--- a/clang-tools-extra/clang-tidy/tool/clang-tidy-alphabetical-order-check.py
+++ b/clang-tools-extra/clang-tidy/tool/clang-tidy-alphabetical-order-check.py
@@ -15,18 +15,13 @@
 
 Normalize clang-tidy docs with deterministic sorting for linting/tests.
 
-Subcommands:
-  - checks-list: Sort entries in docs/clang-tidy/checks/list.rst csv-table.
-  - release-notes: Sort key sections in docs/ReleaseNotes.rst and de-duplicate
-                   entries in "Changes in existing checks".
-
-Usage:
-  clang-tidy-alphabetical-order-check.py <subcommand> [-i <input rst>] [-o 
<output rst>] [--fix]
+Behavior:
+- Sort entries in docs/clang-tidy/checks/list.rst csv-table.
+- Sort key sections in docs/ReleaseNotes.rst. Does not remove duplicate
+  entries; developers should merge duplicates manually when needed.
 
 Flags:
-  -i/--input   Input file.
-  -o/--output  Write normalized content here; omit to write to stdout.
-  --fix        Rewrite the input file in place. Cannot be combined with 
-o/--output.
+  -o/--output  Write normalized content to this path instead of updating docs.
 """
 
 import argparse
@@ -36,7 +31,13 @@
 import sys
 from typing import List, Optional, Sequence, Tuple
 
+# Matches a :doc:`label <path>` or :doc:`label` reference anywhere in text and
+# captures the label. Used to sort bullet items alphabetically in ReleaseNotes
+# items by their label.
 DOC_LABEL_RN_RE = re.compile(r":doc:`(?P<label>[^`<]+)\s*(?:<[^>]+>)?`")
+
+# Matches a single csv-table row line in list.rst that begins with a :doc:
+# reference, capturing the label. Used to extract the sort key per row.
 DOC_LINE_RE = re.compile(r"^\s*:doc:`(?P<label>[^`<]+?)\s*<[^>]+>`.*$")
 
 
@@ -55,6 +56,12 @@ def write_text(path: str, content: str) -> None:
 
 
 def normalize_list_rst(lines: List[str]) -> str:
+    """Return normalized content of checks list.rst from given lines.
+
+    Input: full file content split into lines.
+    Output: single string with csv-table rows sorted by :doc: label while
+            preserving header/leading comments and trailing content.
+    """
     out: List[str] = []
     i = 0
     n = len(lines)
@@ -92,37 +99,15 @@ def key_for(line: str):
     return "".join(out)
 
 
-def run_checks_list(inp: Optional[str], out_path: Optional[str], fix: bool) -> 
int:
-    if not inp:
-        inp = os.path.normpath(
-            os.path.join(
-                script_dir(),
-                "..",
-                "..",
-                "docs",
-                "clang-tidy",
-                "checks",
-                "list.rst",
-            )
-        )
-    lines = read_text(inp)
-    normalized = normalize_list_rst(lines)
-    if fix and out_path:
-        sys.stderr.write("error: --fix cannot be used together with 
--output\n")
-        return 2
-    if fix:
-        original = "".join(lines)
-        if original != normalized:
-            write_text(inp, normalized)
-        return 0
-    if out_path:
-        write_text(out_path, normalized)
-        return 0
-    sys.stdout.write(normalized)
-    return 0
+def find_heading(lines: Sequence[str], title: str) -> Optional[int]:
+    """Find heading start index for a section underlined with ^ characters.
 
+    The function looks for a line equal to `title` followed by a line that
+    consists solely of ^, which matches the ReleaseNotes style for subsection
+    headings used here.
 
-def find_heading(lines: Sequence[str], title: str) -> Optional[int]:
+    Returns index of the title line, or None if not found.
+    """
     for i in range(len(lines) - 1):
         if lines[i].rstrip("\n") == title:
             underline = lines[i + 1].rstrip("\n")
@@ -173,125 +158,125 @@ def collect_bullet_blocks(
     return prefix, blocks, suffix
 
 
-def sort_and_dedup_blocks(
-    blocks: List[Tuple[str, List[str]]], dedup: bool = False
-) -> List[List[str]]:
-    seen = set()
-    filtered: List[Tuple[str, List[str]]] = []
-    for key, block in blocks:
-        if dedup:
-            if key in seen:
-                continue
-            seen.add(key)
-        filtered.append((key, block))
-    filtered.sort(key=lambda kb: kb[0])
-    return [b for _, b in filtered]
+def sort_blocks(blocks: List[Tuple[str, List[str]]]) -> List[List[str]]:
+    """Return blocks sorted deterministically by their extracted label.
 
+    Duplicates are preserved; merging is left to authors to handle manually.
+    """
+    return [b for _, b in sorted(blocks, key=lambda kb: kb[0])]
 
-def normalize_release_notes(lines: List[str]) -> str:
-    sections = [
-        ("New checks", False),
-        ("New check aliases", False),
-        ("Changes in existing checks", True),
-    ]
 
-    out = list(lines)
+def _find_section_bounds(
+    lines: Sequence[str], title: str, next_title: Optional[str]
+) -> Optional[Tuple[int, int, int]]:
+    """Return (h_start, sec_start, sec_end) for section `title`.
 
-    for idx in range(len(sections) - 1, -1, -1):
-        title, dedup = sections[idx]
-        h_start = find_heading(out, title)
-
-        if h_start is None:
-            continue
-
-        sec_start = h_start + 2
-
-        if idx + 1 < len(sections):
-            next_title = sections[idx + 1][0]
-            h_end = find_heading(out, next_title)
-            if h_end is None:
-                h_end = sec_start
-                while h_end + 1 < len(out):
-                    if out[h_end].strip() and set(out[h_end + 1].rstrip("\n")) 
== {"^"}:
-                        break
-                    h_end += 1
-            sec_end = h_end
-        else:
+    - h_start: index of the section title line
+    - sec_start: index of the first content line after underline
+    - sec_end: index of the first line of the next section title (or end)
+    """
+    h_start = find_heading(lines, title)
+    if h_start is None:
+        return None
+
+    sec_start = h_start + 2
+
+    # Determine end of section either from next_title or by scanning.
+    if next_title is not None:
+        h_end = find_heading(lines, next_title)
+        if h_end is None:
+            # Scan forward to the next heading-like underline.
             h_end = sec_start
-            while h_end + 1 < len(out):
-                if out[h_end].strip() and set(out[h_end + 1].rstrip("\n")) == 
{"^"}:
+            while h_end + 1 < len(lines):
+                if lines[h_end].strip() and set(lines[h_end + 1].rstrip("\n")) 
== {"^"}:
                     break
                 h_end += 1
-            sec_end = h_end
+        sec_end = h_end
+    else:
+        # Scan to end or until a heading underline is found.
+        h_end = sec_start
+        while h_end + 1 < len(lines):
+            if lines[h_end].strip() and set(lines[h_end + 1].rstrip("\n")) == 
{"^"}:
+                break
+            h_end += 1
+        sec_end = h_end
 
-        prefix, blocks, suffix = collect_bullet_blocks(out, sec_start, sec_end)
-        sorted_blocks = sort_and_dedup_blocks(blocks, dedup=dedup)
+    return h_start, sec_start, sec_end
 
-        new_section: List[str] = []
-        new_section.extend(prefix)
-        for i_b, b in enumerate(sorted_blocks):
-            if i_b > 0 and (
-                not new_section or (new_section and new_section[-1].strip() != 
"")
-            ):
-                new_section.append("\n")
-            new_section.extend(b)
-        new_section.extend(suffix)
 
-        out = out[:sec_start] + new_section + out[sec_end:]
+def _normalize_release_notes_section(
+    lines: List[str], title: str, next_title: Optional[str]
+) -> List[str]:
+    """Normalize a single release-notes section and return updated lines."""
+    bounds = _find_section_bounds(lines, title, next_title)
+    if bounds is None:
+        return lines
+    _, sec_start, sec_end = bounds
+
+    prefix, blocks, suffix = collect_bullet_blocks(lines, sec_start, sec_end)
+    sorted_blocks = sort_blocks(blocks)
+
+    new_section: List[str] = []
+    new_section.extend(prefix)
+    for i_b, b in enumerate(sorted_blocks):
+        if i_b > 0 and (not new_section or (new_section and 
new_section[-1].strip() != "")):
+            new_section.append("\n")
+        new_section.extend(b)
+    new_section.extend(suffix)
+
+    return lines[:sec_start] + new_section + lines[sec_end:]
+
+
+def normalize_release_notes(lines: List[str]) -> str:
+    sections = ["New checks", "New check aliases", "Changes in existing 
checks"]
+
+    out = list(lines)
+
+    for idx in range(len(sections) - 1, -1, -1):
+        title = sections[idx]
+        next_title = sections[idx + 1] if idx + 1 < len(sections) else None
+        out = _normalize_release_notes_section(out, title, next_title)
 
     return "".join(out)
 
 
-def run_release_notes(inp: Optional[str], out_path: Optional[str], fix: bool) 
-> int:
-    if not inp:
-        inp = os.path.normpath(
-            os.path.join(script_dir(), "..", "..", "docs", "ReleaseNotes.rst")
-        )
-    lines = read_text(inp)
-    normalized = normalize_release_notes(lines)
-    if fix and out_path:
-        sys.stderr.write("error: --fix cannot be used together with 
--output\n")
-        return 2
-    if fix:
-        original = "".join(lines)
-        if original != normalized:
-            write_text(inp, normalized)
-        return 0
-    if out_path:
-        write_text(out_path, normalized)
-        return 0
-    sys.stdout.write(normalized)
-    return 0
+def _default_paths() -> Tuple[str, str]:
+    base = os.path.normpath(os.path.join(script_dir(), "..", ".."))
+    list_doc = os.path.join(base, "docs", "clang-tidy", "checks", "list.rst")
+    rn_doc = os.path.join(base, "docs", "ReleaseNotes.rst")
+    return list_doc, rn_doc
 
 
 def main(argv: List[str]) -> int:
     ap = argparse.ArgumentParser()
-    sub = ap.add_subparsers(dest="cmd", required=True)
-
-    ap_checks = sub.add_parser(
-        "checks-list", help="normalize clang-tidy checks list.rst"
-    )
-    ap_checks.add_argument("-i", "--input", dest="inp", default=None)
-    ap_checks.add_argument("-o", "--output", dest="out", default=None)
-    ap_checks.add_argument(
-        "--fix", action="store_true", help="rewrite the input file in place"
-    )
-
-    ap_rn = sub.add_parser("release-notes", help="normalize ReleaseNotes.rst 
sections")
-    ap_rn.add_argument("-i", "--input", dest="inp", default=None)
-    ap_rn.add_argument("-o", "--output", dest="out", default=None)
-    ap_rn.add_argument(
-        "--fix", action="store_true", help="rewrite the input file in place"
-    )
-
+    ap.add_argument("-o", "--output", dest="out", default=None)
     args = ap.parse_args(argv)
 
-    if args.cmd == "checks-list":
-        return run_checks_list(args.inp, args.out, args.fix)
-    if args.cmd == "release-notes":
-        return run_release_notes(args.inp, args.out, args.fix)
+    list_doc, rn_doc = _default_paths()
 
-    ap.error("unknown command")
+    if args.out:
+        out_path = args.out
+        out_lower = os.path.basename(out_path).lower()
+        if "release" in out_lower:
+            lines = read_text(rn_doc)
+            normalized = normalize_release_notes(lines)
+            write_text(out_path, normalized)
+            return 0
+        else:
+            lines = read_text(list_doc)
+            normalized = normalize_list_rst(lines)
+            write_text(out_path, normalized)
+            return 0
+
+    list_lines = read_text(list_doc)
+    rn_lines = read_text(rn_doc)
+    list_norm = normalize_list_rst(list_lines)
+    rn_norm = normalize_release_notes(rn_lines)
+    if "".join(list_lines) != list_norm:
+        write_text(list_doc, list_norm)
+    if "".join(rn_lines) != rn_norm:
+        write_text(rn_doc, rn_norm)
+    return 0
 
 
 if __name__ == "__main__":
diff --git 
a/clang-tools-extra/test/clang-tidy/infrastructure/alphabetical-order.cpp 
b/clang-tools-extra/test/clang-tidy/infrastructure/alphabetical-order.cpp
index 0ac1484a00561..c238884007595 100644
--- a/clang-tools-extra/test/clang-tidy/infrastructure/alphabetical-order.cpp
+++ b/clang-tools-extra/test/clang-tidy/infrastructure/alphabetical-order.cpp
@@ -1,3 +1,5 @@
-// RUN: %python 
%S/../../../clang-tidy/tool/clang-tidy-alphabetical-order-check.py checks-list 
-i %S/../../../docs/clang-tidy/checks/list.rst | diff --strip-trailing-cr - 
%S/../../../docs/clang-tidy/checks/list.rst
+// RUN: %python 
%S/../../../clang-tidy/tool/clang-tidy-alphabetical-order-check.py -o 
%t.clang-tidy-checks-list.rst
+// RUN: diff --strip-trailing-cr %t.clang-tidy-checks-list.rst 
%S/../../../docs/clang-tidy/checks/list.rst
 
-// RUN: %python 
%S/../../../clang-tidy/tool/clang-tidy-alphabetical-order-check.py 
release-notes -i %S/../../../docs/ReleaseNotes.rst | diff --strip-trailing-cr - 
%S/../../../docs/ReleaseNotes.rst
+// RUN: %python 
%S/../../../clang-tidy/tool/clang-tidy-alphabetical-order-check.py -o 
%t.ReleaseNotes.rst
+// RUN: diff --strip-trailing-cr %t.ReleaseNotes.rst 
%S/../../../docs/ReleaseNotes.rst

>From 555beb058a7ac8118c0603690a4570dc51b74733 Mon Sep 17 00:00:00 2001
From: mtx <[email protected]>
Date: Mon, 3 Nov 2025 11:51:23 +0800
Subject: [PATCH 05/12] fix format

---
 .../clang-tidy/tool/clang-tidy-alphabetical-order-check.py    | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git 
a/clang-tools-extra/clang-tidy/tool/clang-tidy-alphabetical-order-check.py 
b/clang-tools-extra/clang-tidy/tool/clang-tidy-alphabetical-order-check.py
index 58d93dcf31235..80e0450c5987c 100644
--- a/clang-tools-extra/clang-tidy/tool/clang-tidy-alphabetical-order-check.py
+++ b/clang-tools-extra/clang-tidy/tool/clang-tidy-alphabetical-order-check.py
@@ -219,7 +219,9 @@ def _normalize_release_notes_section(
     new_section: List[str] = []
     new_section.extend(prefix)
     for i_b, b in enumerate(sorted_blocks):
-        if i_b > 0 and (not new_section or (new_section and 
new_section[-1].strip() != "")):
+        if i_b > 0 and (
+            not new_section or (new_section and new_section[-1].strip() != "")
+        ):
             new_section.append("\n")
         new_section.extend(b)
     new_section.extend(suffix)

>From 66c831b396b102018c4dd875cc0074c51357c323 Mon Sep 17 00:00:00 2001
From: mtx <[email protected]>
Date: Mon, 3 Nov 2025 14:18:51 +0800
Subject: [PATCH 06/12] fix csv-table related issues

---
 .../clang-tidy-alphabetical-order-check.py    | 50 +++++++++----------
 1 file changed, 25 insertions(+), 25 deletions(-)

diff --git 
a/clang-tools-extra/clang-tidy/tool/clang-tidy-alphabetical-order-check.py 
b/clang-tools-extra/clang-tidy/tool/clang-tidy-alphabetical-order-check.py
index 80e0450c5987c..5ebbbb75d92dc 100644
--- a/clang-tools-extra/clang-tidy/tool/clang-tidy-alphabetical-order-check.py
+++ b/clang-tools-extra/clang-tidy/tool/clang-tidy-alphabetical-order-check.py
@@ -59,32 +59,12 @@ def normalize_list_rst(lines: List[str]) -> str:
     """Return normalized content of checks list.rst from given lines.
 
     Input: full file content split into lines.
-    Output: single string with csv-table rows sorted by :doc: label while
-            preserving header/leading comments and trailing content.
+    Output: single string with all csv-table rows sorted by :doc: label while
+            preserving non-table content and table options/headers.
     """
     out: List[str] = []
     i = 0
     n = len(lines)
-    while i < n:
-        out.append(lines[i])
-        if lines[i].lstrip().startswith(".. csv-table::"):
-            i += 1
-            break
-        i += 1
-
-    while i < n and (lines[i].startswith(" ") or lines[i].strip() == ""):
-        if DOC_LINE_RE.match(lines[i]):
-            break
-        out.append(lines[i])
-        i += 1
-
-    entries: List[str] = []
-    while i < n and lines[i].startswith(" "):
-        if DOC_LINE_RE.match(lines[i]):
-            entries.append(lines[i])
-        else:
-            entries.append(lines[i])
-        i += 1
 
     def key_for(line: str):
         m = DOC_LINE_RE.match(line)
@@ -92,9 +72,29 @@ def key_for(line: str):
             return (1, "")
         return (0, m.group("label"))
 
-    entries_sorted = sorted(entries, key=key_for)
-    out.extend(entries_sorted)
-    out.extend(lines[i:])
+    while i < n:
+        line = lines[i]
+        if line.lstrip().startswith(".. csv-table::"):
+            out.append(line)
+            i += 1
+
+            while i < n and (lines[i].startswith(" ") or lines[i].strip() == 
""):
+                if DOC_LINE_RE.match(lines[i]):
+                    break
+                out.append(lines[i])
+                i += 1
+
+            entries: List[str] = []
+            while i < n and lines[i].startswith(" "):
+                entries.append(lines[i])
+                i += 1
+
+            entries_sorted = sorted(entries, key=key_for)
+            out.extend(entries_sorted)
+            continue
+
+        out.append(line)
+        i += 1
 
     return "".join(out)
 

>From 39c4b32b6d93339087d160600aee57ada9e904a3 Mon Sep 17 00:00:00 2001
From: mtx <[email protected]>
Date: Tue, 4 Nov 2025 13:20:39 +0800
Subject: [PATCH 07/12] ~

---
 .../tool/clang-tidy-alphabetical-order-check.py    | 14 +++++++-------
 1 file changed, 7 insertions(+), 7 deletions(-)

diff --git 
a/clang-tools-extra/clang-tidy/tool/clang-tidy-alphabetical-order-check.py 
b/clang-tools-extra/clang-tidy/tool/clang-tidy-alphabetical-order-check.py
index 5ebbbb75d92dc..21111d9cb91eb 100644
--- a/clang-tools-extra/clang-tidy/tool/clang-tidy-alphabetical-order-check.py
+++ b/clang-tools-extra/clang-tidy/tool/clang-tidy-alphabetical-order-check.py
@@ -10,10 +10,10 @@
 
 """
 
-ClangTidy Alphabetical Order Checker
-====================================
+Clang-Tidy Alphabetical Order Checker
+=====================================
 
-Normalize clang-tidy docs with deterministic sorting for linting/tests.
+Normalize Clang-Tidy documentation with deterministic sorting for 
linting/tests.
 
 Behavior:
 - Sort entries in docs/clang-tidy/checks/list.rst csv-table.
@@ -55,7 +55,7 @@ def write_text(path: str, content: str) -> None:
         f.write(content)
 
 
-def normalize_list_rst(lines: List[str]) -> str:
+def normalize_list_rst(lines: Sequence[str]) -> str:
     """Return normalized content of checks list.rst from given lines.
 
     Input: full file content split into lines.
@@ -205,7 +205,7 @@ def _find_section_bounds(
 
 
 def _normalize_release_notes_section(
-    lines: List[str], title: str, next_title: Optional[str]
+    lines: Sequence[str], title: str, next_title: Optional[str]
 ) -> List[str]:
     """Normalize a single release-notes section and return updated lines."""
     bounds = _find_section_bounds(lines, title, next_title)
@@ -226,10 +226,10 @@ def _normalize_release_notes_section(
         new_section.extend(b)
     new_section.extend(suffix)
 
-    return lines[:sec_start] + new_section + lines[sec_end:]
+    return list(lines[:sec_start]) + new_section + list(lines[sec_end:])
 
 
-def normalize_release_notes(lines: List[str]) -> str:
+def normalize_release_notes(lines: Sequence[str]) -> str:
     sections = ["New checks", "New check aliases", "Changes in existing 
checks"]
 
     out = list(lines)

>From 3883f1424756fa3e991680a8eb5f62eb1d95b299 Mon Sep 17 00:00:00 2001
From: mtx <[email protected]>
Date: Tue, 4 Nov 2025 16:20:12 +0800
Subject: [PATCH 08/12] Add Duplicate check

---
 .../clang-tidy-alphabetical-order-check.py    | 116 ++++++++++++++++--
 1 file changed, 103 insertions(+), 13 deletions(-)

diff --git 
a/clang-tools-extra/clang-tidy/tool/clang-tidy-alphabetical-order-check.py 
b/clang-tools-extra/clang-tidy/tool/clang-tidy-alphabetical-order-check.py
index 21111d9cb91eb..4ff03b97b99e2 100644
--- a/clang-tools-extra/clang-tidy/tool/clang-tidy-alphabetical-order-check.py
+++ b/clang-tools-extra/clang-tidy/tool/clang-tidy-alphabetical-order-check.py
@@ -17,8 +17,8 @@
 
 Behavior:
 - Sort entries in docs/clang-tidy/checks/list.rst csv-table.
-- Sort key sections in docs/ReleaseNotes.rst. Does not remove duplicate
-  entries; developers should merge duplicates manually when needed.
+- Sort key sections in docs/ReleaseNotes.rst.
+- Detect duplicated entries in 'Changes in existing checks'.
 
 Flags:
   -o/--output  Write normalized content to this path instead of updating docs.
@@ -29,7 +29,7 @@
 import os
 import re
 import sys
-from typing import List, Optional, Sequence, Tuple
+from typing import Dict, List, Optional, Sequence, Tuple
 
 # Matches a :doc:`label <path>` or :doc:`label` reference anywhere in text and
 # captures the label. Used to sort bullet items alphabetically in ReleaseNotes
@@ -165,6 +165,56 @@ def sort_blocks(blocks: List[Tuple[str, List[str]]]) -> 
List[List[str]]:
     """
     return [b for _, b in sorted(blocks, key=lambda kb: kb[0])]
 
+def find_duplicate_block_details(
+    lines: Sequence[str], title: str
+) -> List[Tuple[str, List[Tuple[int, List[str]]]]]:
+    """Return detailed duplicate info as (key, [(start_idx, block_lines), 
...]).
+
+    start_idx is the 0-based index of the first line of the bullet block in
+    the original lines list. Only keys with more than one occurrence are
+    returned, and occurrences are listed in the order they appear.
+    """
+    bounds = _find_section_bounds(lines, title, None)
+    if bounds is None:
+        return []
+    _, sec_start, sec_end = bounds
+
+    i = sec_start
+    n = sec_end
+
+    while i < n and not is_bullet_start(lines[i]):
+        i += 1
+
+    blocks_with_pos: List[Tuple[str, int, List[str]]] = []
+    while i < n:
+        if not is_bullet_start(lines[i]):
+            break
+        bstart = i
+        i += 1
+        while i < n and not is_bullet_start(lines[i]):
+            if (
+                i + 1 < n
+                and set(lines[i + 1].rstrip("\n")) == {"^"}
+                and lines[i].strip()
+            ):
+                break
+            i += 1
+        block = list(lines[bstart:i])
+        key = extract_label(block[0])
+        blocks_with_pos.append((key, bstart, block))
+
+    grouped: Dict[str, List[Tuple[int, List[str]]]] = {}
+    for key, start, block in blocks_with_pos:
+        grouped.setdefault(key, []).append((start, block))
+
+    result: List[Tuple[str, List[Tuple[int, List[str]]]]] = []
+    for key, occs in grouped.items():
+        if len(occs) > 1:
+            result.append((key, occs))
+
+    result.sort(key=lambda kv: kv[0])
+    return result
+
 
 def _find_section_bounds(
     lines: Sequence[str], title: str, next_title: Optional[str]
@@ -210,7 +260,7 @@ def _normalize_release_notes_section(
     """Normalize a single release-notes section and return updated lines."""
     bounds = _find_section_bounds(lines, title, next_title)
     if bounds is None:
-        return lines
+        return list(lines)
     _, sec_start, sec_end = bounds
 
     prefix, blocks, suffix = collect_bullet_blocks(lines, sec_start, sec_end)
@@ -249,6 +299,47 @@ def _default_paths() -> Tuple[str, str]:
     return list_doc, rn_doc
 
 
+def _emit_duplicate_report(lines: Sequence[str], title: str) -> Optional[str]:
+    dups_detail = find_duplicate_block_details(lines, title)
+    if not dups_detail:
+        return None
+    out: List[str] = []
+    out.append(f"Error: Duplicate entries in '{title}':\n")
+    for key, occs in dups_detail:
+        out.append(f"\n-- Duplicate: {key}\n")
+        for start_idx, block in occs:
+            out.append(f"- At line {start_idx + 1}:\n")
+            out.append("".join(block))
+            if not (block and block[-1].endswith("\n")):
+                out.append("\n")
+    return "".join(out)
+
+
+def _handle_release_notes_out(out_path: str, rn_doc: str) -> int:
+    lines = read_text(rn_doc)
+    normalized = normalize_release_notes(lines)
+    write_text(out_path, normalized)
+
+    # Prefer reporting ordering issues first; let diff fail the test.
+    if "".join(lines) != normalized:
+        sys.stderr.write("Note: 'ReleaseNotes.rst' section is not normalized; 
Please fix ordering first.\n")
+        return 0
+
+    # Ordering is clean then enforce duplicates.
+    report = _emit_duplicate_report(lines, "Changes in existing checks")
+    if report:
+        sys.stderr.write(report)
+        return 3
+    return 0
+
+
+def _handle_checks_list_out(out_path: str, list_doc: str) -> int:
+    lines = read_text(list_doc)
+    normalized = normalize_list_rst(lines)
+    write_text(out_path, normalized)
+    return 0
+
+
 def main(argv: List[str]) -> int:
     ap = argparse.ArgumentParser()
     ap.add_argument("-o", "--output", dest="out", default=None)
@@ -260,15 +351,9 @@ def main(argv: List[str]) -> int:
         out_path = args.out
         out_lower = os.path.basename(out_path).lower()
         if "release" in out_lower:
-            lines = read_text(rn_doc)
-            normalized = normalize_release_notes(lines)
-            write_text(out_path, normalized)
-            return 0
+            return _handle_release_notes_out(out_path, rn_doc)
         else:
-            lines = read_text(list_doc)
-            normalized = normalize_list_rst(lines)
-            write_text(out_path, normalized)
-            return 0
+            return _handle_checks_list_out(out_path, list_doc)
 
     list_lines = read_text(list_doc)
     rn_lines = read_text(rn_doc)
@@ -278,8 +363,13 @@ def main(argv: List[str]) -> int:
         write_text(list_doc, list_norm)
     if "".join(rn_lines) != rn_norm:
         write_text(rn_doc, rn_norm)
+
+    report = _emit_duplicate_report(rn_lines, "Changes in existing checks")
+    if report:
+        sys.stderr.write(report)
+        return 3
     return 0
 
 
 if __name__ == "__main__":
-    main(sys.argv[1:])
+    sys.exit(main(sys.argv[1:]))

>From 081131fb2e5b69ec9787834bd0793eee8f218970 Mon Sep 17 00:00:00 2001
From: mtx <[email protected]>
Date: Tue, 4 Nov 2025 16:29:38 +0800
Subject: [PATCH 09/12] fix format issue

---
 .../clang-tidy/tool/clang-tidy-alphabetical-order-check.py    | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git 
a/clang-tools-extra/clang-tidy/tool/clang-tidy-alphabetical-order-check.py 
b/clang-tools-extra/clang-tidy/tool/clang-tidy-alphabetical-order-check.py
index 4ff03b97b99e2..c9175d512ef98 100644
--- a/clang-tools-extra/clang-tidy/tool/clang-tidy-alphabetical-order-check.py
+++ b/clang-tools-extra/clang-tidy/tool/clang-tidy-alphabetical-order-check.py
@@ -322,7 +322,9 @@ def _handle_release_notes_out(out_path: str, rn_doc: str) 
-> int:
 
     # Prefer reporting ordering issues first; let diff fail the test.
     if "".join(lines) != normalized:
-        sys.stderr.write("Note: 'ReleaseNotes.rst' section is not normalized; 
Please fix ordering first.\n")
+        sys.stderr.write(
+            "Note: 'ReleaseNotes.rst' section is not normalized; Please fix 
ordering first.\n"
+        )
         return 0
 
     # Ordering is clean then enforce duplicates.

>From 5fcca0272d97a68c47ca0714d6cc9306db8f9e81 Mon Sep 17 00:00:00 2001
From: mtx <[email protected]>
Date: Tue, 4 Nov 2025 16:35:52 +0800
Subject: [PATCH 10/12] ~

---
 .../clang-tidy/tool/clang-tidy-alphabetical-order-check.py       | 1 +
 1 file changed, 1 insertion(+)

diff --git 
a/clang-tools-extra/clang-tidy/tool/clang-tidy-alphabetical-order-check.py 
b/clang-tools-extra/clang-tidy/tool/clang-tidy-alphabetical-order-check.py
index c9175d512ef98..433ed6da19f02 100644
--- a/clang-tools-extra/clang-tidy/tool/clang-tidy-alphabetical-order-check.py
+++ b/clang-tools-extra/clang-tidy/tool/clang-tidy-alphabetical-order-check.py
@@ -165,6 +165,7 @@ def sort_blocks(blocks: List[Tuple[str, List[str]]]) -> 
List[List[str]]:
     """
     return [b for _, b in sorted(blocks, key=lambda kb: kb[0])]
 
+
 def find_duplicate_block_details(
     lines: Sequence[str], title: str
 ) -> List[Tuple[str, List[Tuple[int, List[str]]]]]:

>From af4daa23c4c8106c9312dd86814f8f69ed38e09d Mon Sep 17 00:00:00 2001
From: mtx <[email protected]>
Date: Wed, 5 Nov 2025 15:45:37 +0800
Subject: [PATCH 11/12] Add a test file

---
 ...r-check.py => check-alphabetical-order.py} |   0
 .../tool/check-alphabetical-order_test.py     | 224 ++++++++++++++++++
 2 files changed, 224 insertions(+)
 rename 
clang-tools-extra/clang-tidy/tool/{clang-tidy-alphabetical-order-check.py => 
check-alphabetical-order.py} (100%)
 create mode 100644 
clang-tools-extra/clang-tidy/tool/check-alphabetical-order_test.py

diff --git 
a/clang-tools-extra/clang-tidy/tool/clang-tidy-alphabetical-order-check.py 
b/clang-tools-extra/clang-tidy/tool/check-alphabetical-order.py
similarity index 100%
rename from 
clang-tools-extra/clang-tidy/tool/clang-tidy-alphabetical-order-check.py
rename to clang-tools-extra/clang-tidy/tool/check-alphabetical-order.py
diff --git a/clang-tools-extra/clang-tidy/tool/check-alphabetical-order_test.py 
b/clang-tools-extra/clang-tidy/tool/check-alphabetical-order_test.py
new file mode 100644
index 0000000000000..21f8ce80806d9
--- /dev/null
+++ b/clang-tools-extra/clang-tidy/tool/check-alphabetical-order_test.py
@@ -0,0 +1,224 @@
+# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+# See https://llvm.org/LICENSE.txt for license information.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+# To run these tests:
+# python3 check-alphabetical-order_test.py -v
+
+import io
+import os
+import tempfile
+import unittest
+from contextlib import redirect_stderr
+import importlib.util
+import importlib.machinery
+from typing import Any, cast
+
+
+def _load_script_module():
+    here = os.path.dirname(cast(str, __file__))
+    script_path = os.path.normpath(
+        os.path.join(here, "check-alphabetical-order.py")
+    )
+    loader = importlib.machinery.SourceFileLoader(
+        "check_alphabetical_order", cast(str, script_path)
+    )
+    spec = importlib.util.spec_from_loader(loader.name, loader)
+    if spec is None or spec.loader is None:
+        raise ImportError(f"Failed to load spec for {script_path}")
+    mod = importlib.util.module_from_spec(spec)
+    spec.loader.exec_module(mod)
+    return mod
+
+
+_mod = cast(Any, _load_script_module())
+
+
+class TestAlphabeticalOrderCheck(unittest.TestCase):
+    def test_normalize_list_rst_sorts_rows(self):
+        lines = [
+            "Header\n",
+            "\n",
+            ".. csv-table:: Clang-Tidy checks\n",
+            '   :header: "Check", "Info"\n',
+            "\n",
+            "   :doc:`zebra <clang-tidy/checks/zebra>` Z line\n",
+            "   :doc:`Alpha <clang-tidy/checks/alpha>` A line\n",
+            "   some non-doc row that should stay after docs\n",
+            "\n",
+            "Footer\n",
+        ]
+
+        out = _mod.normalize_list_rst(lines)
+        # Alpha should appear before zebra in normalized csv-table rows.
+        alpha: str = "Alpha <clang-tidy/checks/alpha>"
+        zebra: str = "zebra <clang-tidy/checks/zebra>"
+        self.assertLess(out.find(alpha), out.find(zebra))
+        # Non-doc row should remain after doc rows within the table region.
+        self.assertGreater(out.find("some non-doc row"), out.find("zebra"))
+
+    def test_find_heading(self):
+        lines = [
+            "- something\n",
+            "New checks\n",
+            "^^^^^^^^^^^\n",
+            "- something\n",
+        ]
+        idx = _mod.find_heading(lines, "New checks")
+        self.assertEqual(idx, 1)
+
+    def test_collect_and_sort_blocks(self):
+        # Section content with two bullets and a suffix line.
+        lines = [
+            "Intro\n",
+            "- :doc:`Zed <clang-tidy/checks/zed>`: details\n",
+            "  continuation\n",
+            "- :doc:`alpha <clang-tidy/checks/alpha>`: more details\n",
+            "\n",
+        ]
+        prefix, blocks, suffix = _mod.collect_bullet_blocks(
+            lines, 0, len(lines)
+        )
+        # Prefix is the intro line until first bullet; suffix is trailing 
lines.
+        self.assertEqual(prefix, ["Intro\n"])
+        # Access suffix to satisfy static analyzers and assert it matches 
expectation.
+        self.assertEqual(suffix, [])
+        # Ensure sort by extracted label is deterministic and case-sensitive.
+        sorted_blocks = _mod.sort_blocks(blocks)
+        joined = "".join([l for b in sorted_blocks for l in b])
+        # Uppercase Z sorts before lowercase a in ASCII.
+        zed: str = "Zed <clang-tidy/checks/zed>"
+        aval: str = "alpha <clang-tidy/checks/alpha>"
+        self.assertLess(joined.find(zed), joined.find(aval))
+
+    def test_normalize_single_section_orders_bullets(self):
+        content = [
+            "New checks\n",
+            "^^^^^^^^^^^\n",
+            "- :doc:`zed <clang-tidy/checks/zed>`: new\n",
+            "- :doc:`Alpha <clang-tidy/checks/alpha>`: new\n",
+            "\n",
+        ]
+        out = "".join(
+            _mod._normalize_release_notes_section(content, "New checks", None)
+        )
+        # Uppercase A sorts before lowercase z in ASCII.
+        aitem: str = "- :doc:`Alpha <clang-tidy/checks/alpha>`: new"
+        zitem: str = "- :doc:`zed <clang-tidy/checks/zed>`: new"
+        self.assertLess(out.find(aitem), out.find(zitem))
+
+    def test_duplicate_detection_and_report(self):
+        lines = [
+            "Changes in existing checks\n",
+            "^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
+            "- :doc:`Alpha <clang-tidy/checks/alpha>`: change one\n",
+            "- :doc:`Zed <clang-tidy/checks/zed>`: change something\n",
+            "- :doc:`Alpha <clang-tidy/checks/alpha>`: change two\n",
+            "\n",
+        ]
+        dups = _mod.find_duplicate_block_details(
+            lines, "Changes in existing checks"
+        )
+        # Expect one duplicate group for 'Alpha' with two occurrences.
+        self.assertEqual(len(dups), 1)
+        key, occs = dups[0]
+        self.assertEqual(key.strip(), "Alpha")
+        self.assertEqual(len(occs), 2)
+
+        report = _mod._emit_duplicate_report(
+            lines, "Changes in existing checks"
+        )
+        self.assertIsInstance(report, str)
+        self.assertIn(
+            "Duplicate entries in 'Changes in existing checks':", report
+        )
+        self.assertIn("-- Duplicate: Alpha", report)
+        self.assertEqual(report.count("- At line "), 2)
+
+    def test_handle_release_notes_out_unsorted_returns_ok(self):
+        # When content is not normalized, the function writes normalized text 
and returns 0.
+        rn_lines = [
+            "New checks\n",
+            "^^^^^^^^^^^\n",
+            "- :doc:`Zed <clang-tidy/checks/zed>`: new\n",
+            "- :doc:`Alpha <clang-tidy/checks/alpha>`: new\n",
+            "\n",
+        ]
+        with tempfile.TemporaryDirectory() as td:
+            rn_doc = os.path.join(td, "ReleaseNotes.rst")
+            out_path = os.path.join(td, "out.rst")
+            with open(rn_doc, "w", encoding="utf-8") as f:
+                f.write("".join(rn_lines))
+
+            buf = io.StringIO()
+            with redirect_stderr(buf):
+                rc = _mod._handle_release_notes_out(out_path, rn_doc)
+
+            self.assertEqual(rc, 0)
+            with open(out_path, "r", encoding="utf-8") as f:
+                out = f.read()
+
+            self.assertLess(
+                out.find("Alpha <clang-tidy/checks/alpha>"),
+                out.find("Zed <clang-tidy/checks/zed>"),
+            )
+            self.assertIn("not normalized", buf.getvalue())
+
+    def test_handle_release_notes_out_duplicates_fail(self):
+        # Sorting is already correct but duplicates exist, should return 3 and 
report.
+        rn_lines = [
+            "Changes in existing checks\n",
+            "^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
+            "- :doc:`Alpha <clang-tidy/checks/alpha>`: change one\n",
+            "  change one\n\n",
+            "- :doc:`Alpha <clang-tidy/checks/alpha>`: change two\n\n",
+            "  change two\n\n",
+        ]
+        with tempfile.TemporaryDirectory() as td:
+            rn_doc = os.path.join(td, "ReleaseNotes.rst")
+            out_path = os.path.join(td, "out.rst")
+            with open(rn_doc, "w", encoding="utf-8") as f:
+                f.write("".join(rn_lines))
+
+            buf = io.StringIO()
+            with redirect_stderr(buf):
+                rc = _mod._handle_release_notes_out(out_path, rn_doc)
+
+            self.assertEqual(rc, 3)
+            self.assertIn(
+                "Duplicate entries in 'Changes in existing checks':",
+                buf.getvalue(),
+            )
+
+    def test_handle_checks_list_out_writes_normalized(self):
+        list_lines = [
+            ".. csv-table:: List\n",
+            '   :header: "Check", "Info"\n',
+            "\n",
+            "   :doc:`Zed <clang-tidy/checks/zed>` foo\n",
+            "   :doc:`Beta <clang-tidy/checks/beta>` bar\n",
+            "   :doc:`Alpha <clang-tidy/checks/alpha>` baz\n",
+            "   :doc:`Baz <clang-tidy/checks/baz>` baz\n",
+        ]
+        with tempfile.TemporaryDirectory() as td:
+            in_doc = os.path.join(td, "list.rst")
+            out_doc = os.path.join(td, "out.rst")
+            with open(in_doc, "w", encoding="utf-8") as f:
+                f.write("".join(list_lines))
+            rc = _mod._handle_checks_list_out(out_doc, in_doc)
+            self.assertEqual(rc, 0)
+            with open(out_doc, "r", encoding="utf-8") as f:
+                out = f.read()
+            alpha = out.find("Alpha <clang-tidy/checks/alpha>")
+            baz = out.find("Baz <clang-tidy/checks/baz>")
+            beta = out.find("Beta <clang-tidy/checks/beta>")
+            zed = out.find("Zed <clang-tidy/checks/zed>")
+            for pos in (alpha, baz, beta, zed):
+                self.assertGreaterEqual(pos, 0)
+            self.assertLess(alpha, baz)
+            self.assertLess(baz, beta)
+            self.assertLess(beta, zed)
+
+
+if __name__ == "__main__":
+    unittest.main()

>From d14d899a26fff0c1b2266260734011a5d225a2dc Mon Sep 17 00:00:00 2001
From: mtx <[email protected]>
Date: Wed, 5 Nov 2025 16:01:02 +0800
Subject: [PATCH 12/12] ~

---
 .../tool/check-alphabetical-order_test.py     | 22 +++++--------------
 1 file changed, 5 insertions(+), 17 deletions(-)

diff --git a/clang-tools-extra/clang-tidy/tool/check-alphabetical-order_test.py 
b/clang-tools-extra/clang-tidy/tool/check-alphabetical-order_test.py
index 21f8ce80806d9..908421bc7488d 100644
--- a/clang-tools-extra/clang-tidy/tool/check-alphabetical-order_test.py
+++ b/clang-tools-extra/clang-tidy/tool/check-alphabetical-order_test.py
@@ -17,9 +17,7 @@
 
 def _load_script_module():
     here = os.path.dirname(cast(str, __file__))
-    script_path = os.path.normpath(
-        os.path.join(here, "check-alphabetical-order.py")
-    )
+    script_path = os.path.normpath(os.path.join(here, 
"check-alphabetical-order.py"))
     loader = importlib.machinery.SourceFileLoader(
         "check_alphabetical_order", cast(str, script_path)
     )
@@ -76,14 +74,10 @@ def test_collect_and_sort_blocks(self):
             "- :doc:`alpha <clang-tidy/checks/alpha>`: more details\n",
             "\n",
         ]
-        prefix, blocks, suffix = _mod.collect_bullet_blocks(
-            lines, 0, len(lines)
-        )
+        prefix, blocks, suffix = _mod.collect_bullet_blocks(lines, 0, 
len(lines))
         # Prefix is the intro line until first bullet; suffix is trailing 
lines.
         self.assertEqual(prefix, ["Intro\n"])
-        # Access suffix to satisfy static analyzers and assert it matches 
expectation.
         self.assertEqual(suffix, [])
-        # Ensure sort by extracted label is deterministic and case-sensitive.
         sorted_blocks = _mod.sort_blocks(blocks)
         joined = "".join([l for b in sorted_blocks for l in b])
         # Uppercase Z sorts before lowercase a in ASCII.
@@ -116,22 +110,16 @@ def test_duplicate_detection_and_report(self):
             "- :doc:`Alpha <clang-tidy/checks/alpha>`: change two\n",
             "\n",
         ]
-        dups = _mod.find_duplicate_block_details(
-            lines, "Changes in existing checks"
-        )
+        dups = _mod.find_duplicate_block_details(lines, "Changes in existing 
checks")
         # Expect one duplicate group for 'Alpha' with two occurrences.
         self.assertEqual(len(dups), 1)
         key, occs = dups[0]
         self.assertEqual(key.strip(), "Alpha")
         self.assertEqual(len(occs), 2)
 
-        report = _mod._emit_duplicate_report(
-            lines, "Changes in existing checks"
-        )
+        report = _mod._emit_duplicate_report(lines, "Changes in existing 
checks")
         self.assertIsInstance(report, str)
-        self.assertIn(
-            "Duplicate entries in 'Changes in existing checks':", report
-        )
+        self.assertIn("Duplicate entries in 'Changes in existing checks':", 
report)
         self.assertIn("-- Duplicate: Alpha", report)
         self.assertEqual(report.count("- At line "), 2)
 

_______________________________________________
cfe-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to