The jdk wants to know the cpu info and hardware capablities on
aarch64. Previously we didnt support READ_SPECIAL reg in userland.
Also we gained elf_aux_info(3) in recent releases for hardware
capablities. This diff adds support for cpu info and hardware
capablites and fixes a few regression tests that have been
deadlocking on aarch64.

So far only tested on Apple Mac Mini M2 Pro. I will try my
rockpro64 next as well.

okay?

Index: 1.8/Makefile
===================================================================
RCS file: /cvs/ports/devel/jdk/1.8/Makefile,v
diff -u -p -u -r1.100 Makefile
--- 1.8/Makefile        3 Nov 2025 13:37:17 -0000       1.100
+++ 1.8/Makefile        13 Dec 2025 23:35:00 -0000
@@ -12,6 +12,7 @@ V=            ${BASE_VER}.${UPDATE_VER}.${BUILD_VE
 PKGNAME=       jdk-${V}
 PKGSTEM=       jdk-${BASE_VER}
 EPOCH=         0
+REVISION=      0
 
 DIST_SUBDIR=   jdk
 DISTNAME=      jdk8u${UPDATE_VER}-${BUILD_VER}.${BSD_PORT_REL}
Index: 
1.8/patches/patch-hotspot_src_os_cpu_bsd_aarch64_vm_vm_version_bsd_aarch64_cpps
===================================================================
RCS file: 
1.8/patches/patch-hotspot_src_os_cpu_bsd_aarch64_vm_vm_version_bsd_aarch64_cpps
diff -N 
1.8/patches/patch-hotspot_src_os_cpu_bsd_aarch64_vm_vm_version_bsd_aarch64_cpps
--- /dev/null   1 Jan 1970 00:00:00 -0000
+++ 
1.8/patches/patch-hotspot_src_os_cpu_bsd_aarch64_vm_vm_version_bsd_aarch64_cpps 
    13 Dec 2025 23:35:00 -0000
@@ -0,0 +1,425 @@
+Detect CPU, model, variant and revision.
+Get hardware capablilites using elf_aux_info(3).
+
+Index:hotspot/src/os_cpu/bsd_aarch64/vm/vm_version_bsd_aarch64.cpp
+Index: hotspot/src/os_cpu/bsd_aarch64/vm/vm_version_bsd_aarch64.cpp
+--- hotspot/src/os_cpu/bsd_aarch64/vm/vm_version_bsd_aarch64.cpp.orig
++++ hotspot/src/os_cpu/bsd_aarch64/vm/vm_version_bsd_aarch64.cpp
+@@ -27,91 +27,70 @@
+ #include "vm_version_aarch64.hpp"
+ 
+ #include <machine/armreg.h>
+-#if defined (__FreeBSD__)
+-#include <machine/elf.h>
++#if defined (__FreeBSD__) || defined (__OpenBSD__)
++#include <sys/auxv.h>
+ #endif
+ 
+-#ifndef HWCAP_ASIMD
+-#define HWCAP_ASIMD (1<<1)
+-#endif
+-
+-#ifndef HWCAP_AES
+-#define HWCAP_AES   (1<<3)
+-#endif
+-
+-#ifndef HWCAP_PMULL
+-#define HWCAP_PMULL (1<<4)
+-#endif
+-
+-#ifndef HWCAP_SHA1
+-#define HWCAP_SHA1  (1<<5)
+-#endif
+-
+-#ifndef HWCAP_SHA2
+-#define HWCAP_SHA2  (1<<6)
+-#endif
+-
+-#ifndef HWCAP_CRC32
+-#define HWCAP_CRC32 (1<<7)
+-#endif
+-
+-#ifndef HWCAP_ATOMICS
+-#define HWCAP_ATOMICS (1<<8)
+-#endif
+-
+-#ifndef ID_AA64PFR0_AdvSIMD_SHIFT
+-#define ID_AA64PFR0_AdvSIMD_SHIFT 20
+-#endif
+-
+-#ifndef ID_AA64PFR0_AdvSIMD
+-#define ID_AA64PFR0_AdvSIMD(x) ((x) & (UL(0xf) << ID_AA64PFR0_AdvSIMD_SHIFT))
+-#endif
+-
+-#ifndef ID_AA64PFR0_AdvSIMD_IMPL
+-#define ID_AA64PFR0_AdvSIMD_IMPL (UL(0x0) << ID_AA64PFR0_AdvSIMD_SHIFT)
+-#endif
+-
+-#ifndef ID_AA64PFR0_AdvSIMD_HP
+-#define ID_AA64PFR0_AdvSIMD_HP (UL(0x1) << ID_AA64PFR0_AdvSIMD_SHIFT)
+-#endif
+-
+-#ifndef ID_AA64ISAR0_AES_VAL
+-#define ID_AA64ISAR0_AES_VAL ID_AA64ISAR0_AES
+-#endif
+-
+-#ifndef ID_AA64ISAR0_SHA1_VAL
+-#define ID_AA64ISAR0_SHA1_VAL ID_AA64ISAR0_SHA1
+-#endif
+-
+-#ifndef ID_AA64ISAR0_SHA2_VAL
+-#define ID_AA64ISAR0_SHA2_VAL ID_AA64ISAR0_SHA2
+-#endif
+-
+-#ifndef ID_AA64ISAR0_CRC32_VAL
+-#define ID_AA64ISAR0_CRC32_VAL ID_AA64ISAR0_CRC32
+-#endif
+-
+ #define       CPU_IMPL_ARM            0x41
+ #define       CPU_IMPL_BROADCOM       0x42
+ #define       CPU_IMPL_CAVIUM         0x43
+ #define       CPU_IMPL_DEC            0x44
++#define       CPU_IMPL_FUJITSU        0x46
++#define       CPU_IMPL_HISILICON      0x48
+ #define       CPU_IMPL_INFINEON       0x49
+ #define       CPU_IMPL_FREESCALE      0x4D
+ #define       CPU_IMPL_NVIDIA         0x4E
+ #define       CPU_IMPL_APM            0x50
+ #define       CPU_IMPL_QUALCOMM       0x51
+ #define       CPU_IMPL_MARVELL        0x56
++#define       CPU_IMPL_APPLE          0x61
+ #define       CPU_IMPL_INTEL          0x69
++#define       CPU_IMPL_AMPERE         0xC0
++#define       CPU_IMPL_MICROSOFT      0x6D
+ 
+ /* ARM Part numbers */
+ #define       CPU_PART_FOUNDATION     0xD00
+-#define       CPU_PART_CORTEX_A35     0xD04
++#define       CPU_PART_CORTEX_A34     0xD02
+ #define       CPU_PART_CORTEX_A53     0xD03
++#define       CPU_PART_CORTEX_A35     0xD04
+ #define       CPU_PART_CORTEX_A55     0xD05
++#define       CPU_PART_CORTEX_A65     0xD06
+ #define       CPU_PART_CORTEX_A57     0xD07
+ #define       CPU_PART_CORTEX_A72     0xD08
+ #define       CPU_PART_CORTEX_A73     0xD09
+ #define       CPU_PART_CORTEX_A75     0xD0A
++#define       CPU_PART_CORTEX_A76     0xD0B
++#define       CPU_PART_NEOVERSE_N1    0xD0C
++#define       CPU_PART_CORTEX_A77     0xD0D
++#define       CPU_PART_CORTEX_A76AE   0xD0E
++#define       CPU_PART_AEM_V8         0xD0F
++#define       CPU_PART_NEOVERSE_V1    0xD40
++#define       CPU_PART_CORTEX_A78     0xD41
++#define       CPU_PART_CORTEX_A78AE   0xD42
++#define       CPU_PART_CORTEX_A65AE   0xD43
++#define       CPU_PART_CORTEX_X1      0xD44
++#define       CPU_PART_CORTEX_A510    0xD46
++#define       CPU_PART_CORTEX_A710    0xD47
++#define       CPU_PART_CORTEX_X2      0xD48
++#define       CPU_PART_NEOVERSE_N2    0xD49
++#define       CPU_PART_NEOVERSE_E1    0xD4A
++#define       CPU_PART_CORTEX_A78C    0xD4B
++#define       CPU_PART_CORTEX_X1C     0xD4C
++#define       CPU_PART_CORTEX_A715    0xD4D
++#define       CPU_PART_CORTEX_X3      0xD4E
++#define       CPU_PART_NEOVERSE_V2    0xD4F
++#define       CPU_PART_CORTEX_A520    0xD80
++#define       CPU_PART_CORTEX_A720    0xD81
++#define       CPU_PART_CORTEX_X4      0xD82
++#define       CPU_PART_NEOVERSE_V3AE  0xD83
++#define       CPU_PART_NEOVERSE_V3    0xD84
++#define       CPU_PART_CORTEX_X925    0xD85
++#define       CPU_PART_CORTEX_A725    0xD87
++#define       CPU_PART_C1_NANO        0xD8A
++#define       CPU_PART_C1_PRO         0xD8B
++#define       CPU_PART_C1_ULTRA       0xD8C
++#define       CPU_PART_NEOVERSE_N3    0xD8E
++#define       CPU_PART_C1_PREMIUM     0xD90
+ 
+ /* Cavium Part numbers */
+ #define       CPU_PART_THUNDERX       0x0A1
+@@ -124,21 +103,40 @@
+ 
+ #define       CPU_REV_THUNDERX2_0     0x00
+ 
++/* APM (now Ampere) Part number */
++#define CPU_PART_EMAG8180     0x000
++
++/* Ampere Part numbers */
++#define       CPU_PART_AMPERE1        0xAC3
++#define       CPU_PART_AMPERE1A       0xAC4
++
++/* Microsoft Part numbers */
++#define       CPU_PART_AZURE_COBALT_100       0xD49
++
++/* Qualcomm */
++#define       CPU_PART_KRYO400_GOLD   0x804
++#define       CPU_PART_KRYO400_SILVER 0x805
++
++/* Apple part numbers */
++#define CPU_PART_M1_ICESTORM      0x022
++#define CPU_PART_M1_FIRESTORM     0x023
++#define CPU_PART_M1_ICESTORM_PRO  0x024
++#define CPU_PART_M1_FIRESTORM_PRO 0x025
++#define CPU_PART_M1_ICESTORM_MAX  0x028
++#define CPU_PART_M1_FIRESTORM_MAX 0x029
++#define CPU_PART_M2_BLIZZARD      0x032
++#define CPU_PART_M2_AVALANCHE     0x033
++#define CPU_PART_M2_BLIZZARD_PRO  0x034
++#define CPU_PART_M2_AVALANCHE_PRO 0x035
++#define CPU_PART_M2_BLIZZARD_MAX  0x038
++#define CPU_PART_M2_AVALANCHE_MAX 0x039
++
+ #define       CPU_IMPL(midr)  (((midr) >> 24) & 0xff)
+ #define       CPU_PART(midr)  (((midr) >> 4) & 0xfff)
+ #define       CPU_VAR(midr)   (((midr) >> 20) & 0xf)
+ #define       CPU_REV(midr)   (((midr) >> 0) & 0xf)
+ #define UL(x)   UINT64_C(x)
+ 
+-struct cpu_desc {
+-      u_int           cpu_impl;
+-      u_int           cpu_part_num;
+-      u_int           cpu_variant;
+-      u_int           cpu_revision;
+-      const char      *cpu_impl_name;
+-      const char      *cpu_part_name;
+-};
+-
+ struct cpu_parts {
+       u_int           part_id;
+       const char      *part_name;
+@@ -161,16 +159,51 @@ struct cpu_implementers {
+  */
+ /* ARM Ltd. */
+ static const struct cpu_parts cpu_parts_arm[] = {
++      { CPU_PART_AEM_V8, "AEMv8" },
+       { CPU_PART_FOUNDATION, "Foundation-Model" },
++      { CPU_PART_CORTEX_A34, "Cortex-A34" },
+       { CPU_PART_CORTEX_A35, "Cortex-A35" },
+       { CPU_PART_CORTEX_A53, "Cortex-A53" },
+       { CPU_PART_CORTEX_A55, "Cortex-A55" },
+       { CPU_PART_CORTEX_A57, "Cortex-A57" },
++      { CPU_PART_CORTEX_A65, "Cortex-A65" },
++      { CPU_PART_CORTEX_A65AE, "Cortex-A65AE" },
+       { CPU_PART_CORTEX_A72, "Cortex-A72" },
+       { CPU_PART_CORTEX_A73, "Cortex-A73" },
+       { CPU_PART_CORTEX_A75, "Cortex-A75" },
++      { CPU_PART_CORTEX_A76, "Cortex-A76" },
++      { CPU_PART_CORTEX_A76AE, "Cortex-A76AE" },
++      { CPU_PART_CORTEX_A77, "Cortex-A77" },
++      { CPU_PART_CORTEX_A78, "Cortex-A78" },
++      { CPU_PART_CORTEX_A78AE, "Cortex-A78AE" },
++      { CPU_PART_CORTEX_A78C, "Cortex-A78C" },
++      { CPU_PART_CORTEX_A510, "Cortex-A510" },
++      { CPU_PART_CORTEX_A520, "Cortex-A520" },
++      { CPU_PART_CORTEX_A710, "Cortex-A710" },
++      { CPU_PART_CORTEX_A715, "Cortex-A715" },
++      { CPU_PART_CORTEX_A720, "Cortex-A720" },
++      { CPU_PART_CORTEX_A725, "Cortex-A725" },
++      { CPU_PART_CORTEX_X925, "Cortex-A925" },
++      { CPU_PART_CORTEX_X1, "Cortex-X1" },
++      { CPU_PART_CORTEX_X1C, "Cortex-X1C" },
++      { CPU_PART_CORTEX_X2, "Cortex-X2" },
++      { CPU_PART_CORTEX_X3, "Cortex-X3" },
++      { CPU_PART_CORTEX_X4, "Cortex-X4" },
++      { CPU_PART_C1_NANO, "C1-Nano" },
++      { CPU_PART_C1_PRO, "C1-Pro" },
++      { CPU_PART_C1_PREMIUM, "C1-Premium" },
++      { CPU_PART_C1_ULTRA, "C1-Ultra" },
++      { CPU_PART_NEOVERSE_E1, "Neoverse-E1" },
++      { CPU_PART_NEOVERSE_N1, "Neoverse-N1" },
++      { CPU_PART_NEOVERSE_N2, "Neoverse-N2" },
++      { CPU_PART_NEOVERSE_N3, "Neoverse-N3" },
++      { CPU_PART_NEOVERSE_V1, "Neoverse-V1" },
++      { CPU_PART_NEOVERSE_V2, "Neoverse-V2" },
++      { CPU_PART_NEOVERSE_V3, "Neoverse-V3" },
++      { CPU_PART_NEOVERSE_V3AE, "Neoverse-V3AE" },
+       CPU_PART_NONE,
+ };
++
+ /* Cavium */
+ static const struct cpu_parts cpu_parts_cavium[] = {
+       { CPU_PART_THUNDERX, "ThunderX" },
+@@ -178,6 +211,49 @@ static const struct cpu_parts cpu_parts_cavium[] = {
+       CPU_PART_NONE,
+ };
+ 
++/* APM (now Ampere) */
++static const struct cpu_parts cpu_parts_apm[] = {
++      { CPU_PART_EMAG8180, "eMAG 8180" },
++      CPU_PART_NONE,
++};
++
++/* Ampere */
++static const struct cpu_parts cpu_parts_ampere[] = {
++      { CPU_PART_AMPERE1, "AmpereOne AC03" },
++      { CPU_PART_AMPERE1A, "AmpereOne AC04" },
++      CPU_PART_NONE,
++};
++
++/* Microsoft */
++static const struct cpu_parts cpu_parts_microsoft[] = {
++      { CPU_PART_AZURE_COBALT_100, "Azure Cobalt 100" },
++      CPU_PART_NONE,
++};
++
++/* Qualcomm */
++static const struct cpu_parts cpu_parts_qcom[] = {
++      { CPU_PART_KRYO400_GOLD, "Kryo 400 Gold" },
++      { CPU_PART_KRYO400_SILVER, "Kryo 400 Silver" },
++      CPU_PART_NONE,
++};
++
++/* Apple */
++static const struct cpu_parts cpu_parts_apple[] = {
++      { CPU_PART_M1_ICESTORM, "M1 Icestorm" },
++      { CPU_PART_M1_FIRESTORM, "M1 Firestorm" },
++      { CPU_PART_M1_ICESTORM_PRO, "M1 Pro Icestorm" },
++      { CPU_PART_M1_FIRESTORM_PRO, "M1 Pro Firestorm" },
++      { CPU_PART_M1_ICESTORM_MAX, "M1 Max Icestorm" },
++      { CPU_PART_M1_FIRESTORM_MAX, "M1 Max Firestorm" },
++      { CPU_PART_M2_BLIZZARD, "M2 Blizzard" },
++      { CPU_PART_M2_AVALANCHE, "M2 Avalanche" },
++      { CPU_PART_M2_BLIZZARD_PRO, "M2 Pro Blizzard" },
++      { CPU_PART_M2_AVALANCHE_PRO, "M2 Pro Avalanche" },
++      { CPU_PART_M2_BLIZZARD_MAX, "M2 Max Blizzard" },
++      { CPU_PART_M2_AVALANCHE_MAX, "M2 Max Avalanche" },
++      CPU_PART_NONE,
++};
++
+ /* Unknown */
+ static const struct cpu_parts cpu_parts_none[] = {
+       CPU_PART_NONE,
+@@ -187,41 +263,48 @@ static const struct cpu_parts cpu_parts_none[] = {
+  * Implementers table.
+  */
+ const struct cpu_implementers cpu_implementers[] = {
++      { CPU_IMPL_AMPERE,      "Ampere",       cpu_parts_ampere },
++      { CPU_IMPL_APPLE,       "Apple",        cpu_parts_apple },
++      { CPU_IMPL_APM,         "APM",          cpu_parts_apm },
+       { CPU_IMPL_ARM,         "ARM",          cpu_parts_arm },
+       { CPU_IMPL_BROADCOM,    "Broadcom",     cpu_parts_none },
+       { CPU_IMPL_CAVIUM,      "Cavium",       cpu_parts_cavium },
+       { CPU_IMPL_DEC,         "DEC",          cpu_parts_none },
+-      { CPU_IMPL_INFINEON,    "IFX",          cpu_parts_none },
+       { CPU_IMPL_FREESCALE,   "Freescale",    cpu_parts_none },
+-      { CPU_IMPL_NVIDIA,      "NVIDIA",       cpu_parts_none },
+-      { CPU_IMPL_APM,         "APM",          cpu_parts_none },
+-      { CPU_IMPL_QUALCOMM,    "Qualcomm",     cpu_parts_none },
+-      { CPU_IMPL_MARVELL,     "Marvell",      cpu_parts_none },
++      { CPU_IMPL_FUJITSU,     "Fujitsu",      cpu_parts_none },
++      { CPU_IMPL_HISILICON,   "HiSilicon",    cpu_parts_none },
++      { CPU_IMPL_INFINEON,    "IFX",          cpu_parts_none },
+       { CPU_IMPL_INTEL,       "Intel",        cpu_parts_none },
++      { CPU_IMPL_MARVELL,     "Marvell",      cpu_parts_none },
++      { CPU_IMPL_MICROSOFT,   "Microsoft",    cpu_parts_microsoft },
++      { CPU_IMPL_NVIDIA,      "NVIDIA",       cpu_parts_none },
++      { CPU_IMPL_QUALCOMM,    "Qualcomm",     cpu_parts_qcom },
+       CPU_IMPLEMENTER_NONE,
+ };
+-
+-#ifdef __OpenBSD__
+-// READ_SPECIALREG is not available from userland on OpenBSD.
+-// Hardcode these values to the "lowest common denominator"
++ 
+ unsigned long VM_Version::os_get_processor_features() {
+-  _cpu = CPU_IMPL_ARM;
+-  _model = CPU_PART_CORTEX_A53;
+-  _variant = 0;
+-  _revision = 0;
+-  return HWCAP_ASIMD;
+-}
+-#else
+-unsigned long VM_Version::os_get_processor_features() {
+-  struct cpu_desc cpu_desc[1];
+-  struct cpu_desc user_cpu_desc;
+   unsigned long auxv = 0;
+-  uint64_t id_aa64isar0, id_aa64pfr0;
++  elf_aux_info(AT_HWCAP, &auxv, sizeof(auxv));
+ 
++  STATIC_ASSERT(CPU_ASIMD   == HWCAP_ASIMD);
++  STATIC_ASSERT(CPU_AES     == HWCAP_AES);
++  STATIC_ASSERT(CPU_PMULL   == HWCAP_PMULL);
++  STATIC_ASSERT(CPU_SHA1    == HWCAP_SHA1);
++  STATIC_ASSERT(CPU_SHA2    == HWCAP_SHA2);
++  STATIC_ASSERT(CPU_CRC32   == HWCAP_CRC32);
++  STATIC_ASSERT(CPU_LSE     == HWCAP_ATOMICS);
++  auxv &=(
++      HWCAP_ASIMD   |
++      HWCAP_AES     |
++      HWCAP_PMULL   |
++      HWCAP_SHA1    |
++      HWCAP_SHA2    |
++      HWCAP_CRC32   |
++      HWCAP_ATOMICS);
++
+   uint32_t midr;
+   uint32_t impl_id;
+   uint32_t part_id;
+-  uint32_t cpu = 0;
+   size_t i;
+   const struct cpu_parts *cpu_partsp = NULL;
+ 
+@@ -231,8 +314,7 @@ unsigned long VM_Version::os_get_processor_features() 
+   for (i = 0; i < nitems(cpu_implementers); i++) {
+     if (impl_id == cpu_implementers[i].impl_id ||
+       cpu_implementers[i].impl_id == 0) {
+-      cpu_desc[cpu].cpu_impl = impl_id;
+-      cpu_desc[cpu].cpu_impl_name = cpu_implementers[i].impl_name;
++      _cpu = impl_id;
+       cpu_partsp = cpu_implementers[i].cpu_parts;
+       break;
+     }
+@@ -241,48 +323,13 @@ unsigned long VM_Version::os_get_processor_features() 
+   part_id = CPU_PART(midr);
+   for (i = 0; &cpu_partsp[i] != NULL; i++) {
+     if (part_id == cpu_partsp[i].part_id || cpu_partsp[i].part_id == 0) {
+-      cpu_desc[cpu].cpu_part_num = part_id;
+-      cpu_desc[cpu].cpu_part_name = cpu_partsp[i].part_name;
++      _model = part_id;
+       break;
+     }
+   }
+ 
+-  cpu_desc[cpu].cpu_revision = CPU_REV(midr);
+-  cpu_desc[cpu].cpu_variant = CPU_VAR(midr);
++  _revision = CPU_REV(midr);
++  _variant = CPU_VAR(midr);
+ 
+-  _cpu = cpu_desc[cpu].cpu_impl;
+-  _variant = cpu_desc[cpu].cpu_variant;
+-  _model = cpu_desc[cpu].cpu_part_num;
+-  _revision = cpu_desc[cpu].cpu_revision;
+-
+-  id_aa64isar0 = READ_SPECIALREG(id_aa64isar0_el1);
+-  id_aa64pfr0 = READ_SPECIALREG(id_aa64pfr0_el1);
+-
+-  if (ID_AA64ISAR0_AES_VAL(id_aa64isar0) == ID_AA64ISAR0_AES_BASE) {
+-    auxv = auxv | HWCAP_AES;
+-  }
+-
+-  if (ID_AA64ISAR0_AES_VAL(id_aa64isar0) == ID_AA64ISAR0_AES_PMULL) {
+-    auxv = auxv | HWCAP_PMULL;
+-  }
+-
+-  if (ID_AA64ISAR0_SHA1_VAL(id_aa64isar0) == ID_AA64ISAR0_SHA1_BASE) {
+-    auxv = auxv | HWCAP_SHA1;
+-  }
+-
+-  if (ID_AA64ISAR0_SHA2_VAL(id_aa64isar0) == ID_AA64ISAR0_SHA2_BASE) {
+-    auxv = auxv | HWCAP_SHA2;
+-  }
+-
+-  if (ID_AA64ISAR0_CRC32_VAL(id_aa64isar0) == ID_AA64ISAR0_CRC32_BASE) {
+-    auxv = auxv | HWCAP_CRC32;
+-  }
+-
+-  if (ID_AA64PFR0_AdvSIMD(id_aa64pfr0) == ID_AA64PFR0_AdvSIMD_IMPL || \
+-      ID_AA64PFR0_AdvSIMD(id_aa64pfr0) == ID_AA64PFR0_AdvSIMD_HP ) {
+-    auxv = auxv | HWCAP_ASIMD;
+-  }
+-
+   return auxv;
+ }
+-#endif
Index: 11/Makefile
===================================================================
RCS file: /cvs/ports/devel/jdk/11/Makefile,v
diff -u -p -u -r1.66 Makefile
--- 11/Makefile 13 Nov 2025 21:21:58 -0000      1.66
+++ 11/Makefile 13 Dec 2025 23:35:00 -0000
@@ -12,6 +12,7 @@ PACKAGE_VER=  ${BASE_VER}.${PATCH_VER}.${
 PKGNAME=       jdk-${PACKAGE_VER}
 PKGSTEM=       jdk-11
 EPOCH=         0
+REVISION=      0
 
 DIST_SUBDIR=   jdk
 DISTNAME=      jdk-${VERSION_STR}
Index: 
11/patches/patch-src_hotspot_os_cpu_bsd_aarch64_vm_version_bsd_aarch64_cpp
===================================================================
RCS file: 
11/patches/patch-src_hotspot_os_cpu_bsd_aarch64_vm_version_bsd_aarch64_cpp
diff -N 
11/patches/patch-src_hotspot_os_cpu_bsd_aarch64_vm_version_bsd_aarch64_cpp
--- /dev/null   1 Jan 1970 00:00:00 -0000
+++ 11/patches/patch-src_hotspot_os_cpu_bsd_aarch64_vm_version_bsd_aarch64_cpp  
13 Dec 2025 23:35:00 -0000
@@ -0,0 +1,453 @@
+Detect CPU, model, variant and revision.
+Get hardware capablilites using elf_aux_info(3).
+
+Index: src/hotspot/os_cpu/bsd_aarch64/vm_version_bsd_aarch64.cpp
+--- src/hotspot/os_cpu/bsd_aarch64/vm_version_bsd_aarch64.cpp.orig
++++ src/hotspot/os_cpu/bsd_aarch64/vm_version_bsd_aarch64.cpp
+@@ -92,91 +92,70 @@ bool VM_Version::is_cpu_emulated() {
+ #include "vm_version_aarch64.hpp"
+ 
+ #include <machine/armreg.h>
+-#if defined (__FreeBSD__)
+-#include <machine/elf.h>
++#if defined (__FreeBSD__) || defined (__OpenBSD__)
++#include <sys/auxv.h>
+ #endif
+ 
+-#ifndef HWCAP_ASIMD
+-#define HWCAP_ASIMD (1<<1)
+-#endif
+-
+-#ifndef HWCAP_AES
+-#define HWCAP_AES   (1<<3)
+-#endif
+-
+-#ifndef HWCAP_PMULL
+-#define HWCAP_PMULL (1<<4)
+-#endif
+-
+-#ifndef HWCAP_SHA1
+-#define HWCAP_SHA1  (1<<5)
+-#endif
+-
+-#ifndef HWCAP_SHA2
+-#define HWCAP_SHA2  (1<<6)
+-#endif
+-
+-#ifndef HWCAP_CRC32
+-#define HWCAP_CRC32 (1<<7)
+-#endif
+-
+-#ifndef HWCAP_ATOMICS
+-#define HWCAP_ATOMICS (1<<8)
+-#endif
+-
+-#ifndef ID_AA64PFR0_AdvSIMD_SHIFT
+-#define ID_AA64PFR0_AdvSIMD_SHIFT 20
+-#endif
+-
+-#ifndef ID_AA64PFR0_AdvSIMD
+-#define ID_AA64PFR0_AdvSIMD(x) ((x) & (UL(0xf) << ID_AA64PFR0_AdvSIMD_SHIFT))
+-#endif
+-
+-#ifndef ID_AA64PFR0_AdvSIMD_IMPL
+-#define ID_AA64PFR0_AdvSIMD_IMPL (UL(0x0) << ID_AA64PFR0_AdvSIMD_SHIFT)
+-#endif
+-
+-#ifndef ID_AA64PFR0_AdvSIMD_HP
+-#define ID_AA64PFR0_AdvSIMD_HP (UL(0x1) << ID_AA64PFR0_AdvSIMD_SHIFT)
+-#endif
+-
+-#ifndef ID_AA64ISAR0_AES_VAL
+-#define ID_AA64ISAR0_AES_VAL ID_AA64ISAR0_AES
+-#endif
+-
+-#ifndef ID_AA64ISAR0_SHA1_VAL
+-#define ID_AA64ISAR0_SHA1_VAL ID_AA64ISAR0_SHA1
+-#endif
+-
+-#ifndef ID_AA64ISAR0_SHA2_VAL
+-#define ID_AA64ISAR0_SHA2_VAL ID_AA64ISAR0_SHA2
+-#endif
+-
+-#ifndef ID_AA64ISAR0_CRC32_VAL
+-#define ID_AA64ISAR0_CRC32_VAL ID_AA64ISAR0_CRC32
+-#endif
+-
+ #define       CPU_IMPL_ARM            0x41
+ #define       CPU_IMPL_BROADCOM       0x42
+ #define       CPU_IMPL_CAVIUM         0x43
+ #define       CPU_IMPL_DEC            0x44
++#define       CPU_IMPL_FUJITSU        0x46
++#define       CPU_IMPL_HISILICON      0x48
+ #define       CPU_IMPL_INFINEON       0x49
+ #define       CPU_IMPL_FREESCALE      0x4D
+ #define       CPU_IMPL_NVIDIA         0x4E
+ #define       CPU_IMPL_APM            0x50
+ #define       CPU_IMPL_QUALCOMM       0x51
+ #define       CPU_IMPL_MARVELL        0x56
++#define       CPU_IMPL_APPLE          0x61
+ #define       CPU_IMPL_INTEL          0x69
++#define       CPU_IMPL_AMPERE         0xC0
++#define       CPU_IMPL_MICROSOFT      0x6D
+ 
+ /* ARM Part numbers */
+ #define       CPU_PART_FOUNDATION     0xD00
+-#define       CPU_PART_CORTEX_A35     0xD04
++#define       CPU_PART_CORTEX_A34     0xD02
+ #define       CPU_PART_CORTEX_A53     0xD03
++#define       CPU_PART_CORTEX_A35     0xD04
+ #define       CPU_PART_CORTEX_A55     0xD05
++#define       CPU_PART_CORTEX_A65     0xD06
+ #define       CPU_PART_CORTEX_A57     0xD07
+ #define       CPU_PART_CORTEX_A72     0xD08
+ #define       CPU_PART_CORTEX_A73     0xD09
+ #define       CPU_PART_CORTEX_A75     0xD0A
++#define       CPU_PART_CORTEX_A76     0xD0B
++#define       CPU_PART_NEOVERSE_N1    0xD0C
++#define       CPU_PART_CORTEX_A77     0xD0D
++#define       CPU_PART_CORTEX_A76AE   0xD0E
++#define       CPU_PART_AEM_V8         0xD0F
++#define       CPU_PART_NEOVERSE_V1    0xD40
++#define       CPU_PART_CORTEX_A78     0xD41
++#define       CPU_PART_CORTEX_A78AE   0xD42
++#define       CPU_PART_CORTEX_A65AE   0xD43
++#define       CPU_PART_CORTEX_X1      0xD44
++#define       CPU_PART_CORTEX_A510    0xD46
++#define       CPU_PART_CORTEX_A710    0xD47
++#define       CPU_PART_CORTEX_X2      0xD48
++#define       CPU_PART_NEOVERSE_N2    0xD49
++#define       CPU_PART_NEOVERSE_E1    0xD4A
++#define       CPU_PART_CORTEX_A78C    0xD4B
++#define       CPU_PART_CORTEX_X1C     0xD4C
++#define       CPU_PART_CORTEX_A715    0xD4D
++#define       CPU_PART_CORTEX_X3      0xD4E
++#define       CPU_PART_NEOVERSE_V2    0xD4F
++#define       CPU_PART_CORTEX_A520    0xD80
++#define       CPU_PART_CORTEX_A720    0xD81
++#define       CPU_PART_CORTEX_X4      0xD82
++#define       CPU_PART_NEOVERSE_V3AE  0xD83
++#define       CPU_PART_NEOVERSE_V3    0xD84
++#define       CPU_PART_CORTEX_X925    0xD85
++#define       CPU_PART_CORTEX_A725    0xD87
++#define       CPU_PART_C1_NANO        0xD8A
++#define       CPU_PART_C1_PRO         0xD8B
++#define       CPU_PART_C1_ULTRA       0xD8C
++#define       CPU_PART_NEOVERSE_N3    0xD8E
++#define       CPU_PART_C1_PREMIUM     0xD90
+ 
+ /* Cavium Part numbers */
+ #define       CPU_PART_THUNDERX       0x0A1
+@@ -189,21 +168,40 @@ bool VM_Version::is_cpu_emulated() {
+ 
+ #define       CPU_REV_THUNDERX2_0     0x00
+ 
++/* APM (now Ampere) Part number */
++#define CPU_PART_EMAG8180     0x000
++
++/* Ampere Part numbers */
++#define       CPU_PART_AMPERE1        0xAC3
++#define       CPU_PART_AMPERE1A       0xAC4
++
++/* Microsoft Part numbers */
++#define       CPU_PART_AZURE_COBALT_100       0xD49
++
++/* Qualcomm */
++#define       CPU_PART_KRYO400_GOLD   0x804
++#define       CPU_PART_KRYO400_SILVER 0x805
++
++/* Apple part numbers */
++#define CPU_PART_M1_ICESTORM      0x022
++#define CPU_PART_M1_FIRESTORM     0x023
++#define CPU_PART_M1_ICESTORM_PRO  0x024
++#define CPU_PART_M1_FIRESTORM_PRO 0x025
++#define CPU_PART_M1_ICESTORM_MAX  0x028
++#define CPU_PART_M1_FIRESTORM_MAX 0x029
++#define CPU_PART_M2_BLIZZARD      0x032
++#define CPU_PART_M2_AVALANCHE     0x033
++#define CPU_PART_M2_BLIZZARD_PRO  0x034
++#define CPU_PART_M2_AVALANCHE_PRO 0x035
++#define CPU_PART_M2_BLIZZARD_MAX  0x038
++#define CPU_PART_M2_AVALANCHE_MAX 0x039
++
+ #define       CPU_IMPL(midr)  (((midr) >> 24) & 0xff)
+ #define       CPU_PART(midr)  (((midr) >> 4) & 0xfff)
+ #define       CPU_VAR(midr)   (((midr) >> 20) & 0xf)
+ #define       CPU_REV(midr)   (((midr) >> 0) & 0xf)
+ #define UL(x)   UINT64_C(x)
+ 
+-struct cpu_desc {
+-      u_int           cpu_impl;
+-      u_int           cpu_part_num;
+-      u_int           cpu_variant;
+-      u_int           cpu_revision;
+-      const char      *cpu_impl_name;
+-      const char      *cpu_part_name;
+-};
+-
+ struct cpu_parts {
+       u_int           part_id;
+       const char      *part_name;
+@@ -226,16 +224,51 @@ struct cpu_implementers {
+  */
+ /* ARM Ltd. */
+ static const struct cpu_parts cpu_parts_arm[] = {
++      { CPU_PART_AEM_V8, "AEMv8" },
+       { CPU_PART_FOUNDATION, "Foundation-Model" },
++      { CPU_PART_CORTEX_A34, "Cortex-A34" },
+       { CPU_PART_CORTEX_A35, "Cortex-A35" },
+       { CPU_PART_CORTEX_A53, "Cortex-A53" },
+       { CPU_PART_CORTEX_A55, "Cortex-A55" },
+       { CPU_PART_CORTEX_A57, "Cortex-A57" },
++      { CPU_PART_CORTEX_A65, "Cortex-A65" },
++      { CPU_PART_CORTEX_A65AE, "Cortex-A65AE" },
+       { CPU_PART_CORTEX_A72, "Cortex-A72" },
+       { CPU_PART_CORTEX_A73, "Cortex-A73" },
+       { CPU_PART_CORTEX_A75, "Cortex-A75" },
++      { CPU_PART_CORTEX_A76, "Cortex-A76" },
++      { CPU_PART_CORTEX_A76AE, "Cortex-A76AE" },
++      { CPU_PART_CORTEX_A77, "Cortex-A77" },
++      { CPU_PART_CORTEX_A78, "Cortex-A78" },
++      { CPU_PART_CORTEX_A78AE, "Cortex-A78AE" },
++      { CPU_PART_CORTEX_A78C, "Cortex-A78C" },
++      { CPU_PART_CORTEX_A510, "Cortex-A510" },
++      { CPU_PART_CORTEX_A520, "Cortex-A520" },
++      { CPU_PART_CORTEX_A710, "Cortex-A710" },
++      { CPU_PART_CORTEX_A715, "Cortex-A715" },
++      { CPU_PART_CORTEX_A720, "Cortex-A720" },
++      { CPU_PART_CORTEX_A725, "Cortex-A725" },
++      { CPU_PART_CORTEX_X925, "Cortex-A925" },
++      { CPU_PART_CORTEX_X1, "Cortex-X1" },
++      { CPU_PART_CORTEX_X1C, "Cortex-X1C" },
++      { CPU_PART_CORTEX_X2, "Cortex-X2" },
++      { CPU_PART_CORTEX_X3, "Cortex-X3" },
++      { CPU_PART_CORTEX_X4, "Cortex-X4" },
++      { CPU_PART_C1_NANO, "C1-Nano" },
++      { CPU_PART_C1_PRO, "C1-Pro" },
++      { CPU_PART_C1_PREMIUM, "C1-Premium" },
++      { CPU_PART_C1_ULTRA, "C1-Ultra" },
++      { CPU_PART_NEOVERSE_E1, "Neoverse-E1" },
++      { CPU_PART_NEOVERSE_N1, "Neoverse-N1" },
++      { CPU_PART_NEOVERSE_N2, "Neoverse-N2" },
++      { CPU_PART_NEOVERSE_N3, "Neoverse-N3" },
++      { CPU_PART_NEOVERSE_V1, "Neoverse-V1" },
++      { CPU_PART_NEOVERSE_V2, "Neoverse-V2" },
++      { CPU_PART_NEOVERSE_V3, "Neoverse-V3" },
++      { CPU_PART_NEOVERSE_V3AE, "Neoverse-V3AE" },
+       CPU_PART_NONE,
+ };
++
+ /* Cavium */
+ static const struct cpu_parts cpu_parts_cavium[] = {
+       { CPU_PART_THUNDERX, "ThunderX" },
+@@ -243,6 +276,49 @@ static const struct cpu_parts cpu_parts_cavium[] = {
+       CPU_PART_NONE,
+ };
+ 
++/* APM (now Ampere) */
++static const struct cpu_parts cpu_parts_apm[] = {
++      { CPU_PART_EMAG8180, "eMAG 8180" },
++      CPU_PART_NONE,
++};
++
++/* Ampere */
++static const struct cpu_parts cpu_parts_ampere[] = {
++      { CPU_PART_AMPERE1, "AmpereOne AC03" },
++      { CPU_PART_AMPERE1A, "AmpereOne AC04" },
++      CPU_PART_NONE,
++};
++
++/* Microsoft */
++static const struct cpu_parts cpu_parts_microsoft[] = {
++      { CPU_PART_AZURE_COBALT_100, "Azure Cobalt 100" },
++      CPU_PART_NONE,
++};
++
++/* Qualcomm */
++static const struct cpu_parts cpu_parts_qcom[] = {
++      { CPU_PART_KRYO400_GOLD, "Kryo 400 Gold" },
++      { CPU_PART_KRYO400_SILVER, "Kryo 400 Silver" },
++      CPU_PART_NONE,
++};
++
++/* Apple */
++static const struct cpu_parts cpu_parts_apple[] = {
++      { CPU_PART_M1_ICESTORM, "M1 Icestorm" },
++      { CPU_PART_M1_FIRESTORM, "M1 Firestorm" },
++      { CPU_PART_M1_ICESTORM_PRO, "M1 Pro Icestorm" },
++      { CPU_PART_M1_FIRESTORM_PRO, "M1 Pro Firestorm" },
++      { CPU_PART_M1_ICESTORM_MAX, "M1 Max Icestorm" },
++      { CPU_PART_M1_FIRESTORM_MAX, "M1 Max Firestorm" },
++      { CPU_PART_M2_BLIZZARD, "M2 Blizzard" },
++      { CPU_PART_M2_AVALANCHE, "M2 Avalanche" },
++      { CPU_PART_M2_BLIZZARD_PRO, "M2 Pro Blizzard" },
++      { CPU_PART_M2_AVALANCHE_PRO, "M2 Pro Avalanche" },
++      { CPU_PART_M2_BLIZZARD_MAX, "M2 Max Blizzard" },
++      { CPU_PART_M2_AVALANCHE_MAX, "M2 Max Avalanche" },
++      CPU_PART_NONE,
++};
++
+ /* Unknown */
+ static const struct cpu_parts cpu_parts_none[] = {
+       CPU_PART_NONE,
+@@ -252,74 +328,62 @@ static const struct cpu_parts cpu_parts_none[] = {
+  * Implementers table.
+  */
+ const struct cpu_implementers cpu_implementers[] = {
++      { CPU_IMPL_AMPERE,      "Ampere",       cpu_parts_ampere },
++      { CPU_IMPL_APPLE,       "Apple",        cpu_parts_apple },
++      { CPU_IMPL_APM,         "APM",          cpu_parts_apm },
+       { CPU_IMPL_ARM,         "ARM",          cpu_parts_arm },
+       { CPU_IMPL_BROADCOM,    "Broadcom",     cpu_parts_none },
+       { CPU_IMPL_CAVIUM,      "Cavium",       cpu_parts_cavium },
+       { CPU_IMPL_DEC,         "DEC",          cpu_parts_none },
+-      { CPU_IMPL_INFINEON,    "IFX",          cpu_parts_none },
+       { CPU_IMPL_FREESCALE,   "Freescale",    cpu_parts_none },
+-      { CPU_IMPL_NVIDIA,      "NVIDIA",       cpu_parts_none },
+-      { CPU_IMPL_APM,         "APM",          cpu_parts_none },
+-      { CPU_IMPL_QUALCOMM,    "Qualcomm",     cpu_parts_none },
+-      { CPU_IMPL_MARVELL,     "Marvell",      cpu_parts_none },
++      { CPU_IMPL_FUJITSU,     "Fujitsu",      cpu_parts_none },
++      { CPU_IMPL_HISILICON,   "HiSilicon",    cpu_parts_none },
++      { CPU_IMPL_INFINEON,    "IFX",          cpu_parts_none },
+       { CPU_IMPL_INTEL,       "Intel",        cpu_parts_none },
++      { CPU_IMPL_MARVELL,     "Marvell",      cpu_parts_none },
++      { CPU_IMPL_MICROSOFT,   "Microsoft",    cpu_parts_microsoft },
++      { CPU_IMPL_NVIDIA,      "NVIDIA",       cpu_parts_none },
++      { CPU_IMPL_QUALCOMM,    "Qualcomm",     cpu_parts_qcom },
+       CPU_IMPLEMENTER_NONE,
+ };
+ 
+-#ifdef __FreeBSD__
+-static unsigned long os_get_processor_features() {
+-  unsigned long auxv = 0;
+-  uint64_t id_aa64isar0, id_aa64pfr0;
+-
+-  id_aa64isar0 = READ_SPECIALREG(id_aa64isar0_el1);
+-  id_aa64pfr0 = READ_SPECIALREG(id_aa64pfr0_el1);
+-
+-  if (ID_AA64ISAR0_AES_VAL(id_aa64isar0) == ID_AA64ISAR0_AES_BASE) {
+-    auxv = auxv | HWCAP_AES;
+-  }
+-
+-  if (ID_AA64ISAR0_AES_VAL(id_aa64isar0) == ID_AA64ISAR0_AES_PMULL) {
+-    auxv = auxv | HWCAP_PMULL;
+-  }
+-
+-  if (ID_AA64ISAR0_SHA1_VAL(id_aa64isar0) == ID_AA64ISAR0_SHA1_BASE) {
+-    auxv = auxv | HWCAP_SHA1;
+-  }
+-
+-  if (ID_AA64ISAR0_SHA2_VAL(id_aa64isar0) == ID_AA64ISAR0_SHA2_BASE) {
+-    auxv = auxv | HWCAP_SHA2;
+-  }
+-
+-  if (ID_AA64ISAR0_CRC32_VAL(id_aa64isar0) == ID_AA64ISAR0_CRC32_BASE) {
+-    auxv = auxv | HWCAP_CRC32;
+-  }
+-
+-  if (ID_AA64PFR0_AdvSIMD(id_aa64pfr0) == ID_AA64PFR0_AdvSIMD_IMPL || \
+-      ID_AA64PFR0_AdvSIMD(id_aa64pfr0) == ID_AA64PFR0_AdvSIMD_HP ) {
+-    auxv = auxv | HWCAP_ASIMD;
+-  }
+-
+-  return auxv;
+-}
+-#endif
+-
+ void VM_Version::get_os_cpu_info() {
+-#if defined(__OpenBSD__) || defined(__NetBSD__)
+-  // READ_SPECIALREG is not available from userland on OpenBSD.
++#if defined(__NetBSD__)
++  // READ_SPECIALREG is not available from userland on NetBSD.
+   // Hardcode these values to the "lowest common denominator"
+   _cpu = CPU_IMPL_ARM;
+   _model = CPU_PART_CORTEX_A53;
+   _variant = 0;
+   _revision = 0;
+   _features = HWCAP_ASIMD;
+-#elif defined(__FreeBSD__)
+-  struct cpu_desc cpu_desc[1];
+-  struct cpu_desc user_cpu_desc;
++#elif defined(__FreeBSD__) || defined(__OpenBSD__)
+ 
++  unsigned long auxv = 0;
++  elf_aux_info(AT_HWCAP, &auxv, sizeof(auxv));
++
++  STATIC_ASSERT(CPU_FP      == HWCAP_FP);
++  STATIC_ASSERT(CPU_ASIMD   == HWCAP_ASIMD);
++  STATIC_ASSERT(CPU_EVTSTRM == HWCAP_EVTSTRM);
++  STATIC_ASSERT(CPU_AES     == HWCAP_AES);
++  STATIC_ASSERT(CPU_PMULL   == HWCAP_PMULL);
++  STATIC_ASSERT(CPU_SHA1    == HWCAP_SHA1);
++  STATIC_ASSERT(CPU_SHA2    == HWCAP_SHA2);
++  STATIC_ASSERT(CPU_CRC32   == HWCAP_CRC32);
++  STATIC_ASSERT(CPU_LSE     == HWCAP_ATOMICS);
++  _features = auxv & (
++      HWCAP_FP      |
++      HWCAP_ASIMD   |
++      HWCAP_EVTSTRM |
++      HWCAP_AES     |
++      HWCAP_PMULL   |
++      HWCAP_SHA1    |
++      HWCAP_SHA2    |
++      HWCAP_CRC32   |
++      HWCAP_ATOMICS);
++
+   uint32_t midr;
+   uint32_t impl_id;
+   uint32_t part_id;
+-  uint32_t cpu = 0;
+   size_t i;
+   const struct cpu_parts *cpu_partsp = NULL;
+ 
+@@ -329,8 +393,7 @@ void VM_Version::get_os_cpu_info() {
+   for (i = 0; i < nitems(cpu_implementers); i++) {
+     if (impl_id == cpu_implementers[i].impl_id ||
+       cpu_implementers[i].impl_id == 0) {
+-      cpu_desc[cpu].cpu_impl = impl_id;
+-      cpu_desc[cpu].cpu_impl_name = cpu_implementers[i].impl_name;
++      _cpu = impl_id;
+       cpu_partsp = cpu_implementers[i].cpu_parts;
+       break;
+     }
+@@ -338,36 +401,13 @@ void VM_Version::get_os_cpu_info() {
+   part_id = CPU_PART(midr);
+   for (i = 0; &cpu_partsp[i] != NULL; i++) {
+     if (part_id == cpu_partsp[i].part_id || cpu_partsp[i].part_id == 0) {
+-      cpu_desc[cpu].cpu_part_num = part_id;
+-      cpu_desc[cpu].cpu_part_name = cpu_partsp[i].part_name;
++      _model = part_id;
+       break;
+     }
+   }
+ 
+-  cpu_desc[cpu].cpu_revision = CPU_REV(midr);
+-  cpu_desc[cpu].cpu_variant = CPU_VAR(midr);
+-
+-  _cpu = cpu_desc[cpu].cpu_impl;
+-  _variant = cpu_desc[cpu].cpu_variant;
+-  _model = cpu_desc[cpu].cpu_part_num;
+-  _revision = cpu_desc[cpu].cpu_revision;
+-
+-  uint64_t auxv = os_get_processor_features();
+-
+-  _features = auxv & (
+-      HWCAP_FP      |
+-      HWCAP_ASIMD   |
+-      HWCAP_EVTSTRM |
+-      HWCAP_AES     |
+-      HWCAP_PMULL   |
+-      HWCAP_SHA1    |
+-      HWCAP_SHA2    |
+-      HWCAP_CRC32   |
+-      HWCAP_ATOMICS |
+-      HWCAP_DCPOP   |
+-      HWCAP_SHA3    |
+-      HWCAP_SHA512  |
+-      HWCAP_SVE);
++  _variant = CPU_VAR(midr);
++  _revision = CPU_REV(midr);
+ #endif
+ 
+   uint64_t ctr_el0;
Index: 17/Makefile
===================================================================
RCS file: /cvs/ports/devel/jdk/17/Makefile,v
diff -u -p -u -r1.36 Makefile
--- 17/Makefile 3 Nov 2025 13:44:13 -0000       1.36
+++ 17/Makefile 13 Dec 2025 23:35:00 -0000
@@ -12,6 +12,7 @@ PACKAGE_VER=  ${BASE_VER}.${PATCH_VER}.${
 PKGNAME=       jdk-${PACKAGE_VER}
 PKGSTEM=       jdk-17
 EPOCH=         0
+REVISION=      0
 
 DIST_SUBDIR=   jdk
 DISTNAME=      jdk-${VERSION_STR}
Index: 
17/patches/patch-src_hotspot_os_cpu_bsd_aarch64_vm_version_bsd_aarch64_cpp
===================================================================
RCS file: 
17/patches/patch-src_hotspot_os_cpu_bsd_aarch64_vm_version_bsd_aarch64_cpp
diff -N 
17/patches/patch-src_hotspot_os_cpu_bsd_aarch64_vm_version_bsd_aarch64_cpp
--- /dev/null   1 Jan 1970 00:00:00 -0000
+++ 17/patches/patch-src_hotspot_os_cpu_bsd_aarch64_vm_version_bsd_aarch64_cpp  
13 Dec 2025 23:35:00 -0000
@@ -0,0 +1,461 @@
+Detect CPU, model, variant and revision.
+Get hardware capablilites using elf_aux_info(3).
+
+Index: src/hotspot/os_cpu/bsd_aarch64/vm_version_bsd_aarch64.cpp
+--- src/hotspot/os_cpu/bsd_aarch64/vm_version_bsd_aarch64.cpp.orig
++++ src/hotspot/os_cpu/bsd_aarch64/vm_version_bsd_aarch64.cpp
+@@ -104,91 +104,70 @@ bool VM_Version::is_cpu_emulated() {
+ #else // __APPLE__
+ 
+ #include <machine/armreg.h>
+-#if defined (__FreeBSD__)
+-#include <machine/elf.h>
++#if defined (__FreeBSD__) || defined (__OpenBSD__)
++#include <sys/auxv.h>
+ #endif
+ 
+-#ifndef HWCAP_ASIMD
+-#define HWCAP_ASIMD (1<<1)
+-#endif
+-
+-#ifndef HWCAP_AES
+-#define HWCAP_AES   (1<<3)
+-#endif
+-
+-#ifndef HWCAP_PMULL
+-#define HWCAP_PMULL (1<<4)
+-#endif
+-
+-#ifndef HWCAP_SHA1
+-#define HWCAP_SHA1  (1<<5)
+-#endif
+-
+-#ifndef HWCAP_SHA2
+-#define HWCAP_SHA2  (1<<6)
+-#endif
+-
+-#ifndef HWCAP_CRC32
+-#define HWCAP_CRC32 (1<<7)
+-#endif
+-
+-#ifndef HWCAP_ATOMICS
+-#define HWCAP_ATOMICS (1<<8)
+-#endif
+-
+-#ifndef ID_AA64PFR0_AdvSIMD_SHIFT
+-#define ID_AA64PFR0_AdvSIMD_SHIFT 20
+-#endif
+-
+-#ifndef ID_AA64PFR0_AdvSIMD
+-#define ID_AA64PFR0_AdvSIMD(x) ((x) & (UL(0xf) << ID_AA64PFR0_AdvSIMD_SHIFT))
+-#endif
+-
+-#ifndef ID_AA64PFR0_AdvSIMD_IMPL
+-#define ID_AA64PFR0_AdvSIMD_IMPL (UL(0x0) << ID_AA64PFR0_AdvSIMD_SHIFT)
+-#endif
+-
+-#ifndef ID_AA64PFR0_AdvSIMD_HP
+-#define ID_AA64PFR0_AdvSIMD_HP (UL(0x1) << ID_AA64PFR0_AdvSIMD_SHIFT)
+-#endif
+-
+-#ifndef ID_AA64ISAR0_AES_VAL
+-#define ID_AA64ISAR0_AES_VAL ID_AA64ISAR0_AES
+-#endif
+-
+-#ifndef ID_AA64ISAR0_SHA1_VAL
+-#define ID_AA64ISAR0_SHA1_VAL ID_AA64ISAR0_SHA1
+-#endif
+-
+-#ifndef ID_AA64ISAR0_SHA2_VAL
+-#define ID_AA64ISAR0_SHA2_VAL ID_AA64ISAR0_SHA2
+-#endif
+-
+-#ifndef ID_AA64ISAR0_CRC32_VAL
+-#define ID_AA64ISAR0_CRC32_VAL ID_AA64ISAR0_CRC32
+-#endif
+-
+ #define       CPU_IMPL_ARM            0x41
+ #define       CPU_IMPL_BROADCOM       0x42
+ #define       CPU_IMPL_CAVIUM         0x43
+ #define       CPU_IMPL_DEC            0x44
++#define       CPU_IMPL_FUJITSU        0x46
++#define       CPU_IMPL_HISILICON      0x48
+ #define       CPU_IMPL_INFINEON       0x49
+ #define       CPU_IMPL_FREESCALE      0x4D
+ #define       CPU_IMPL_NVIDIA         0x4E
+ #define       CPU_IMPL_APM            0x50
+ #define       CPU_IMPL_QUALCOMM       0x51
+ #define       CPU_IMPL_MARVELL        0x56
++#define       CPU_IMPL_APPLE          0x61
+ #define       CPU_IMPL_INTEL          0x69
++#define       CPU_IMPL_AMPERE         0xC0
++#define       CPU_IMPL_MICROSOFT      0x6D
+ 
+ /* ARM Part numbers */
+ #define       CPU_PART_FOUNDATION     0xD00
+-#define       CPU_PART_CORTEX_A35     0xD04
++#define       CPU_PART_CORTEX_A34     0xD02
+ #define       CPU_PART_CORTEX_A53     0xD03
++#define       CPU_PART_CORTEX_A35     0xD04
+ #define       CPU_PART_CORTEX_A55     0xD05
++#define       CPU_PART_CORTEX_A65     0xD06
+ #define       CPU_PART_CORTEX_A57     0xD07
+ #define       CPU_PART_CORTEX_A72     0xD08
+ #define       CPU_PART_CORTEX_A73     0xD09
+ #define       CPU_PART_CORTEX_A75     0xD0A
++#define       CPU_PART_CORTEX_A76     0xD0B
++#define       CPU_PART_NEOVERSE_N1    0xD0C
++#define       CPU_PART_CORTEX_A77     0xD0D
++#define       CPU_PART_CORTEX_A76AE   0xD0E
++#define       CPU_PART_AEM_V8         0xD0F
++#define       CPU_PART_NEOVERSE_V1    0xD40
++#define       CPU_PART_CORTEX_A78     0xD41
++#define       CPU_PART_CORTEX_A78AE   0xD42
++#define       CPU_PART_CORTEX_A65AE   0xD43
++#define       CPU_PART_CORTEX_X1      0xD44
++#define       CPU_PART_CORTEX_A510    0xD46
++#define       CPU_PART_CORTEX_A710    0xD47
++#define       CPU_PART_CORTEX_X2      0xD48
++#define       CPU_PART_NEOVERSE_N2    0xD49
++#define       CPU_PART_NEOVERSE_E1    0xD4A
++#define       CPU_PART_CORTEX_A78C    0xD4B
++#define       CPU_PART_CORTEX_X1C     0xD4C
++#define       CPU_PART_CORTEX_A715    0xD4D
++#define       CPU_PART_CORTEX_X3      0xD4E
++#define       CPU_PART_NEOVERSE_V2    0xD4F
++#define       CPU_PART_CORTEX_A520    0xD80
++#define       CPU_PART_CORTEX_A720    0xD81
++#define       CPU_PART_CORTEX_X4      0xD82
++#define       CPU_PART_NEOVERSE_V3AE  0xD83
++#define       CPU_PART_NEOVERSE_V3    0xD84
++#define       CPU_PART_CORTEX_X925    0xD85
++#define       CPU_PART_CORTEX_A725    0xD87
++#define       CPU_PART_C1_NANO        0xD8A
++#define       CPU_PART_C1_PRO         0xD8B
++#define       CPU_PART_C1_ULTRA       0xD8C
++#define       CPU_PART_NEOVERSE_N3    0xD8E
++#define       CPU_PART_C1_PREMIUM     0xD90
+ 
+ /* Cavium Part numbers */
+ #define       CPU_PART_THUNDERX       0x0A1
+@@ -201,21 +180,40 @@ bool VM_Version::is_cpu_emulated() {
+ 
+ #define       CPU_REV_THUNDERX2_0     0x00
+ 
++/* APM (now Ampere) Part number */
++#define CPU_PART_EMAG8180     0x000
++
++/* Ampere Part numbers */
++#define       CPU_PART_AMPERE1        0xAC3
++#define       CPU_PART_AMPERE1A       0xAC4
++
++/* Microsoft Part numbers */
++#define       CPU_PART_AZURE_COBALT_100       0xD49
++
++/* Qualcomm */
++#define       CPU_PART_KRYO400_GOLD   0x804
++#define       CPU_PART_KRYO400_SILVER 0x805
++
++/* Apple part numbers */
++#define CPU_PART_M1_ICESTORM      0x022
++#define CPU_PART_M1_FIRESTORM     0x023
++#define CPU_PART_M1_ICESTORM_PRO  0x024
++#define CPU_PART_M1_FIRESTORM_PRO 0x025
++#define CPU_PART_M1_ICESTORM_MAX  0x028
++#define CPU_PART_M1_FIRESTORM_MAX 0x029
++#define CPU_PART_M2_BLIZZARD      0x032
++#define CPU_PART_M2_AVALANCHE     0x033
++#define CPU_PART_M2_BLIZZARD_PRO  0x034
++#define CPU_PART_M2_AVALANCHE_PRO 0x035
++#define CPU_PART_M2_BLIZZARD_MAX  0x038
++#define CPU_PART_M2_AVALANCHE_MAX 0x039
++
+ #define       CPU_IMPL(midr)  (((midr) >> 24) & 0xff)
+ #define       CPU_PART(midr)  (((midr) >> 4) & 0xfff)
+ #define       CPU_VAR(midr)   (((midr) >> 20) & 0xf)
+ #define       CPU_REV(midr)   (((midr) >> 0) & 0xf)
+ #define UL(x)   UINT64_C(x)
+ 
+-struct cpu_desc {
+-      u_int           cpu_impl;
+-      u_int           cpu_part_num;
+-      u_int           cpu_variant;
+-      u_int           cpu_revision;
+-      const char      *cpu_impl_name;
+-      const char      *cpu_part_name;
+-};
+-
+ struct cpu_parts {
+       u_int           part_id;
+       const char      *part_name;
+@@ -238,16 +236,51 @@ struct cpu_implementers {
+  */
+ /* ARM Ltd. */
+ static const struct cpu_parts cpu_parts_arm[] = {
++      { CPU_PART_AEM_V8, "AEMv8" },
+       { CPU_PART_FOUNDATION, "Foundation-Model" },
++      { CPU_PART_CORTEX_A34, "Cortex-A34" },
+       { CPU_PART_CORTEX_A35, "Cortex-A35" },
+       { CPU_PART_CORTEX_A53, "Cortex-A53" },
+       { CPU_PART_CORTEX_A55, "Cortex-A55" },
+       { CPU_PART_CORTEX_A57, "Cortex-A57" },
++      { CPU_PART_CORTEX_A65, "Cortex-A65" },
++      { CPU_PART_CORTEX_A65AE, "Cortex-A65AE" },
+       { CPU_PART_CORTEX_A72, "Cortex-A72" },
+       { CPU_PART_CORTEX_A73, "Cortex-A73" },
+       { CPU_PART_CORTEX_A75, "Cortex-A75" },
++      { CPU_PART_CORTEX_A76, "Cortex-A76" },
++      { CPU_PART_CORTEX_A76AE, "Cortex-A76AE" },
++      { CPU_PART_CORTEX_A77, "Cortex-A77" },
++      { CPU_PART_CORTEX_A78, "Cortex-A78" },
++      { CPU_PART_CORTEX_A78AE, "Cortex-A78AE" },
++      { CPU_PART_CORTEX_A78C, "Cortex-A78C" },
++      { CPU_PART_CORTEX_A510, "Cortex-A510" },
++      { CPU_PART_CORTEX_A520, "Cortex-A520" },
++      { CPU_PART_CORTEX_A710, "Cortex-A710" },
++      { CPU_PART_CORTEX_A715, "Cortex-A715" },
++      { CPU_PART_CORTEX_A720, "Cortex-A720" },
++      { CPU_PART_CORTEX_A725, "Cortex-A725" },
++      { CPU_PART_CORTEX_X925, "Cortex-A925" },
++      { CPU_PART_CORTEX_X1, "Cortex-X1" },
++      { CPU_PART_CORTEX_X1C, "Cortex-X1C" },
++      { CPU_PART_CORTEX_X2, "Cortex-X2" },
++      { CPU_PART_CORTEX_X3, "Cortex-X3" },
++      { CPU_PART_CORTEX_X4, "Cortex-X4" },
++      { CPU_PART_C1_NANO, "C1-Nano" },
++      { CPU_PART_C1_PRO, "C1-Pro" },
++      { CPU_PART_C1_PREMIUM, "C1-Premium" },
++      { CPU_PART_C1_ULTRA, "C1-Ultra" },
++      { CPU_PART_NEOVERSE_E1, "Neoverse-E1" },
++      { CPU_PART_NEOVERSE_N1, "Neoverse-N1" },
++      { CPU_PART_NEOVERSE_N2, "Neoverse-N2" },
++      { CPU_PART_NEOVERSE_N3, "Neoverse-N3" },
++      { CPU_PART_NEOVERSE_V1, "Neoverse-V1" },
++      { CPU_PART_NEOVERSE_V2, "Neoverse-V2" },
++      { CPU_PART_NEOVERSE_V3, "Neoverse-V3" },
++      { CPU_PART_NEOVERSE_V3AE, "Neoverse-V3AE" },
+       CPU_PART_NONE,
+ };
++
+ /* Cavium */
+ static const struct cpu_parts cpu_parts_cavium[] = {
+       { CPU_PART_THUNDERX, "ThunderX" },
+@@ -255,6 +288,49 @@ static const struct cpu_parts cpu_parts_cavium[] = {
+       CPU_PART_NONE,
+ };
+ 
++/* APM (now Ampere) */
++static const struct cpu_parts cpu_parts_apm[] = {
++      { CPU_PART_EMAG8180, "eMAG 8180" },
++      CPU_PART_NONE,
++};
++
++/* Ampere */
++static const struct cpu_parts cpu_parts_ampere[] = {
++      { CPU_PART_AMPERE1, "AmpereOne AC03" },
++      { CPU_PART_AMPERE1A, "AmpereOne AC04" },
++      CPU_PART_NONE,
++};
++
++/* Microsoft */
++static const struct cpu_parts cpu_parts_microsoft[] = {
++      { CPU_PART_AZURE_COBALT_100, "Azure Cobalt 100" },
++      CPU_PART_NONE,
++};
++
++/* Qualcomm */
++static const struct cpu_parts cpu_parts_qcom[] = {
++      { CPU_PART_KRYO400_GOLD, "Kryo 400 Gold" },
++      { CPU_PART_KRYO400_SILVER, "Kryo 400 Silver" },
++      CPU_PART_NONE,
++};
++
++/* Apple */
++static const struct cpu_parts cpu_parts_apple[] = {
++      { CPU_PART_M1_ICESTORM, "M1 Icestorm" },
++      { CPU_PART_M1_FIRESTORM, "M1 Firestorm" },
++      { CPU_PART_M1_ICESTORM_PRO, "M1 Pro Icestorm" },
++      { CPU_PART_M1_FIRESTORM_PRO, "M1 Pro Firestorm" },
++      { CPU_PART_M1_ICESTORM_MAX, "M1 Max Icestorm" },
++      { CPU_PART_M1_FIRESTORM_MAX, "M1 Max Firestorm" },
++      { CPU_PART_M2_BLIZZARD, "M2 Blizzard" },
++      { CPU_PART_M2_AVALANCHE, "M2 Avalanche" },
++      { CPU_PART_M2_BLIZZARD_PRO, "M2 Pro Blizzard" },
++      { CPU_PART_M2_AVALANCHE_PRO, "M2 Pro Avalanche" },
++      { CPU_PART_M2_BLIZZARD_MAX, "M2 Max Blizzard" },
++      { CPU_PART_M2_AVALANCHE_MAX, "M2 Max Avalanche" },
++      CPU_PART_NONE,
++};
++
+ /* Unknown */
+ static const struct cpu_parts cpu_parts_none[] = {
+       CPU_PART_NONE,
+@@ -264,74 +340,66 @@ static const struct cpu_parts cpu_parts_none[] = {
+  * Implementers table.
+  */
+ const struct cpu_implementers cpu_implementers[] = {
++      { CPU_IMPL_AMPERE,      "Ampere",       cpu_parts_ampere },
++      { CPU_IMPL_APPLE,       "Apple",        cpu_parts_apple },
++      { CPU_IMPL_APM,         "APM",          cpu_parts_apm },
+       { CPU_IMPL_ARM,         "ARM",          cpu_parts_arm },
+       { CPU_IMPL_BROADCOM,    "Broadcom",     cpu_parts_none },
+       { CPU_IMPL_CAVIUM,      "Cavium",       cpu_parts_cavium },
+       { CPU_IMPL_DEC,         "DEC",          cpu_parts_none },
+-      { CPU_IMPL_INFINEON,    "IFX",          cpu_parts_none },
+       { CPU_IMPL_FREESCALE,   "Freescale",    cpu_parts_none },
+-      { CPU_IMPL_NVIDIA,      "NVIDIA",       cpu_parts_none },
+-      { CPU_IMPL_APM,         "APM",          cpu_parts_none },
+-      { CPU_IMPL_QUALCOMM,    "Qualcomm",     cpu_parts_none },
+-      { CPU_IMPL_MARVELL,     "Marvell",      cpu_parts_none },
++      { CPU_IMPL_FUJITSU,     "Fujitsu",      cpu_parts_none },
++      { CPU_IMPL_HISILICON,   "HiSilicon",    cpu_parts_none },
++      { CPU_IMPL_INFINEON,    "IFX",          cpu_parts_none },
+       { CPU_IMPL_INTEL,       "Intel",        cpu_parts_none },
++      { CPU_IMPL_MARVELL,     "Marvell",      cpu_parts_none },
++      { CPU_IMPL_MICROSOFT,   "Microsoft",    cpu_parts_microsoft },
++      { CPU_IMPL_NVIDIA,      "NVIDIA",       cpu_parts_none },
++      { CPU_IMPL_QUALCOMM,    "Qualcomm",     cpu_parts_qcom },
+       CPU_IMPLEMENTER_NONE,
+ };
+ 
+-#ifdef __FreeBSD__
+-static unsigned long os_get_processor_features() {
++void VM_Version::get_os_cpu_info() {
++#if defined(__FreeBSD__) || defined(__OpenBSD__)
++
+   unsigned long auxv = 0;
+-  uint64_t id_aa64isar0, id_aa64pfr0;
++  unsigned long auxv2 = 0;
++  elf_aux_info(AT_HWCAP, &auxv, sizeof(auxv));
++  elf_aux_info(AT_HWCAP2, &auxv2, sizeof(auxv2));
+ 
+-  id_aa64isar0 = READ_SPECIALREG(id_aa64isar0_el1);
+-  id_aa64pfr0 = READ_SPECIALREG(id_aa64pfr0_el1);
++  static_assert(CPU_FP      == HWCAP_FP,      "Flag CPU_FP must follow 
HWCAP");
++  static_assert(CPU_ASIMD   == HWCAP_ASIMD,   "Flag CPU_ASIMD must follow 
HWCAP");
++  static_assert(CPU_EVTSTRM == HWCAP_EVTSTRM, "Flag CPU_EVTSTRM must follow 
HWCAP");
++  static_assert(CPU_AES     == HWCAP_AES,     "Flag CPU_AES must follow 
HWCAP");
++  static_assert(CPU_PMULL   == HWCAP_PMULL,   "Flag CPU_PMULL must follow 
HWCAP");
++  static_assert(CPU_SHA1    == HWCAP_SHA1,    "Flag CPU_SHA1 must follow 
HWCAP");
++  static_assert(CPU_SHA2    == HWCAP_SHA2,    "Flag CPU_SHA2 must follow 
HWCAP");
++  static_assert(CPU_CRC32   == HWCAP_CRC32,   "Flag CPU_CRC32 must follow 
HWCAP");
++  static_assert(CPU_LSE     == HWCAP_ATOMICS, "Flag CPU_LSE must follow 
HWCAP");
++  static_assert(CPU_DCPOP   == HWCAP_DCPOP,   "Flag CPU_DCPOP must follow 
HWCAP");
++  static_assert(CPU_SHA3    == HWCAP_SHA3,    "Flag CPU_SHA3 must follow 
HWCAP");
++  static_assert(CPU_SHA512  == HWCAP_SHA512,  "Flag CPU_SHA512 must follow 
HWCAP");
++  static_assert(CPU_SVE     == HWCAP_SVE,     "Flag CPU_SVE must follow 
HWCAP");
++  _features = auxv & (
++      HWCAP_FP      |
++      HWCAP_ASIMD   |
++      HWCAP_EVTSTRM |
++      HWCAP_AES     |
++      HWCAP_PMULL   |
++      HWCAP_SHA1    |
++      HWCAP_SHA2    |
++      HWCAP_CRC32   |
++      HWCAP_ATOMICS |
++      HWCAP_DCPOP   |
++      HWCAP_SHA3    |
++      HWCAP_SHA512  |
++      HWCAP_SVE);
+ 
+-  if (ID_AA64ISAR0_AES_VAL(id_aa64isar0) == ID_AA64ISAR0_AES_BASE) {
+-    auxv = auxv | HWCAP_AES;
+-  }
++  if (auxv2 & HWCAP2_SVE2) _features |= CPU_SVE2;
+ 
+-  if (ID_AA64ISAR0_AES_VAL(id_aa64isar0) == ID_AA64ISAR0_AES_PMULL) {
+-    auxv = auxv | HWCAP_PMULL;
+-  }
+-
+-  if (ID_AA64ISAR0_SHA1_VAL(id_aa64isar0) == ID_AA64ISAR0_SHA1_BASE) {
+-    auxv = auxv | HWCAP_SHA1;
+-  }
+-
+-  if (ID_AA64ISAR0_SHA2_VAL(id_aa64isar0) == ID_AA64ISAR0_SHA2_BASE) {
+-    auxv = auxv | HWCAP_SHA2;
+-  }
+-
+-  if (ID_AA64ISAR0_CRC32_VAL(id_aa64isar0) == ID_AA64ISAR0_CRC32_BASE) {
+-    auxv = auxv | HWCAP_CRC32;
+-  }
+-
+-  if (ID_AA64PFR0_AdvSIMD(id_aa64pfr0) == ID_AA64PFR0_AdvSIMD_IMPL || \
+-      ID_AA64PFR0_AdvSIMD(id_aa64pfr0) == ID_AA64PFR0_AdvSIMD_HP ) {
+-    auxv = auxv | HWCAP_ASIMD;
+-  }
+-
+-  return auxv;
+-}
+-#endif
+-
+-void VM_Version::get_os_cpu_info() {
+-#ifdef __OpenBSD__
+-  // READ_SPECIALREG is not available from userland on OpenBSD.
+-  // Hardcode these values to the "lowest common denominator"
+-  _cpu = CPU_IMPL_ARM;
+-  _model = CPU_PART_CORTEX_A53;
+-  _variant = 0;
+-  _revision = 0;
+-  _features = HWCAP_ASIMD;
+-#elif defined(__FreeBSD__)
+-  struct cpu_desc cpu_desc[1];
+-  struct cpu_desc user_cpu_desc;
+-
+   uint32_t midr;
+   uint32_t impl_id;
+   uint32_t part_id;
+-  uint32_t cpu = 0;
+   size_t i;
+   const struct cpu_parts *cpu_partsp = NULL;
+ 
+@@ -341,8 +409,7 @@ void VM_Version::get_os_cpu_info() {
+   for (i = 0; i < nitems(cpu_implementers); i++) {
+     if (impl_id == cpu_implementers[i].impl_id ||
+       cpu_implementers[i].impl_id == 0) {
+-      cpu_desc[cpu].cpu_impl = impl_id;
+-      cpu_desc[cpu].cpu_impl_name = cpu_implementers[i].impl_name;
++      _cpu = impl_id;
+       cpu_partsp = cpu_implementers[i].cpu_parts;
+       break;
+     }
+@@ -350,36 +417,13 @@ void VM_Version::get_os_cpu_info() {
+   part_id = CPU_PART(midr);
+   for (i = 0; &cpu_partsp[i] != NULL; i++) {
+     if (part_id == cpu_partsp[i].part_id || cpu_partsp[i].part_id == 0) {
+-      cpu_desc[cpu].cpu_part_num = part_id;
+-      cpu_desc[cpu].cpu_part_name = cpu_partsp[i].part_name;
++      _model = part_id;
+       break;
+     }
+   }
+ 
+-  cpu_desc[cpu].cpu_revision = CPU_REV(midr);
+-  cpu_desc[cpu].cpu_variant = CPU_VAR(midr);
+-
+-  _cpu = cpu_desc[cpu].cpu_impl;
+-  _variant = cpu_desc[cpu].cpu_variant;
+-  _model = cpu_desc[cpu].cpu_part_num;
+-  _revision = cpu_desc[cpu].cpu_revision;
+-
+-  uint64_t auxv = os_get_processor_features();
+-
+-  _features = auxv & (
+-      HWCAP_FP      |
+-      HWCAP_ASIMD   |
+-      HWCAP_EVTSTRM |
+-      HWCAP_AES     |
+-      HWCAP_PMULL   |
+-      HWCAP_SHA1    |
+-      HWCAP_SHA2    |
+-      HWCAP_CRC32   |
+-      HWCAP_ATOMICS |
+-      HWCAP_DCPOP   |
+-      HWCAP_SHA3    |
+-      HWCAP_SHA512  |
+-      HWCAP_SVE);
++  _variant = CPU_VAR(midr);
++  _revision = CPU_REV(midr);
+ #endif
+ 
+   uint64_t ctr_el0;
Index: 21/Makefile
===================================================================
RCS file: /cvs/ports/devel/jdk/21/Makefile,v
diff -u -p -u -r1.10 Makefile
--- 21/Makefile 3 Nov 2025 15:43:43 -0000       1.10
+++ 21/Makefile 13 Dec 2025 23:35:00 -0000
@@ -12,6 +12,7 @@ PACKAGE_VER=  ${BASE_VER}.${PATCH_VER}.${
 PKGNAME=       jdk-${PACKAGE_VER}
 PKGSTEM=       jdk-21
 EPOCH=         0
+REVISION=      0
 
 DIST_SUBDIR=   jdk
 DISTNAME=      jdk-${VERSION_STR}
Index: 21/patches/patch-make_common_NativeCompilation_gmk
===================================================================
RCS file: 
/cvs/ports/devel/jdk/21/patches/patch-make_common_NativeCompilation_gmk,v
diff -u -p -u -r1.1.1.1 patch-make_common_NativeCompilation_gmk
--- 21/patches/patch-make_common_NativeCompilation_gmk  11 Dec 2023 14:36:21 
-0000      1.1.1.1
+++ 21/patches/patch-make_common_NativeCompilation_gmk  13 Dec 2025 23:35:00 
-0000
@@ -5,7 +5,7 @@ get the debug package without bloating t
 Index: make/common/NativeCompilation.gmk
 --- make/common/NativeCompilation.gmk.orig
 +++ make/common/NativeCompilation.gmk
-@@ -1066,9 +1066,8 @@ define SetupNativeCompilationBody
+@@ -1080,9 +1080,8 @@ define SetupNativeCompilationBody
            # so we can run it after strip is called, since strip can sometimes 
mangle the
            # embedded debuglink, which we want to avoid.
            $1_CREATE_DEBUGINFO_CMDS := \
Index: 
21/patches/patch-src_hotspot_os_cpu_bsd_aarch64_vm_version_bsd_aarch64_cpp
===================================================================
RCS file: 
21/patches/patch-src_hotspot_os_cpu_bsd_aarch64_vm_version_bsd_aarch64_cpp
diff -N 
21/patches/patch-src_hotspot_os_cpu_bsd_aarch64_vm_version_bsd_aarch64_cpp
--- /dev/null   1 Jan 1970 00:00:00 -0000
+++ 21/patches/patch-src_hotspot_os_cpu_bsd_aarch64_vm_version_bsd_aarch64_cpp  
13 Dec 2025 23:35:01 -0000
@@ -0,0 +1,464 @@
+Detect CPU, model, variant and revision.
+Get hardware capablilites using elf_aux_info(3).
+
+Index: src/hotspot/os_cpu/bsd_aarch64/vm_version_bsd_aarch64.cpp
+--- src/hotspot/os_cpu/bsd_aarch64/vm_version_bsd_aarch64.cpp.orig
++++ src/hotspot/os_cpu/bsd_aarch64/vm_version_bsd_aarch64.cpp
+@@ -135,91 +135,70 @@ bool VM_Version::is_cpu_emulated() {
+ #else // __APPLE__
+ 
+ #include <machine/armreg.h>
+-#if defined (__FreeBSD__)
+-#include <machine/elf.h>
++#if defined (__FreeBSD__) || defined (__OpenBSD__)
++#include <sys/auxv.h>
+ #endif
+ 
+-#ifndef HWCAP_ASIMD
+-#define HWCAP_ASIMD (1<<1)
+-#endif
+-
+-#ifndef HWCAP_AES
+-#define HWCAP_AES   (1<<3)
+-#endif
+-
+-#ifndef HWCAP_PMULL
+-#define HWCAP_PMULL (1<<4)
+-#endif
+-
+-#ifndef HWCAP_SHA1
+-#define HWCAP_SHA1  (1<<5)
+-#endif
+-
+-#ifndef HWCAP_SHA2
+-#define HWCAP_SHA2  (1<<6)
+-#endif
+-
+-#ifndef HWCAP_CRC32
+-#define HWCAP_CRC32 (1<<7)
+-#endif
+-
+-#ifndef HWCAP_ATOMICS
+-#define HWCAP_ATOMICS (1<<8)
+-#endif
+-
+-#ifndef ID_AA64PFR0_AdvSIMD_SHIFT
+-#define ID_AA64PFR0_AdvSIMD_SHIFT 20
+-#endif
+-
+-#ifndef ID_AA64PFR0_AdvSIMD
+-#define ID_AA64PFR0_AdvSIMD(x) ((x) & (UL(0xf) << ID_AA64PFR0_AdvSIMD_SHIFT))
+-#endif
+-
+-#ifndef ID_AA64PFR0_AdvSIMD_IMPL
+-#define ID_AA64PFR0_AdvSIMD_IMPL (UL(0x0) << ID_AA64PFR0_AdvSIMD_SHIFT)
+-#endif
+-
+-#ifndef ID_AA64PFR0_AdvSIMD_HP
+-#define ID_AA64PFR0_AdvSIMD_HP (UL(0x1) << ID_AA64PFR0_AdvSIMD_SHIFT)
+-#endif
+-
+-#ifndef ID_AA64ISAR0_AES_VAL
+-#define ID_AA64ISAR0_AES_VAL ID_AA64ISAR0_AES
+-#endif
+-
+-#ifndef ID_AA64ISAR0_SHA1_VAL
+-#define ID_AA64ISAR0_SHA1_VAL ID_AA64ISAR0_SHA1
+-#endif
+-
+-#ifndef ID_AA64ISAR0_SHA2_VAL
+-#define ID_AA64ISAR0_SHA2_VAL ID_AA64ISAR0_SHA2
+-#endif
+-
+-#ifndef ID_AA64ISAR0_CRC32_VAL
+-#define ID_AA64ISAR0_CRC32_VAL ID_AA64ISAR0_CRC32
+-#endif
+-
+ #define       CPU_IMPL_ARM            0x41
+ #define       CPU_IMPL_BROADCOM       0x42
+ #define       CPU_IMPL_CAVIUM         0x43
+ #define       CPU_IMPL_DEC            0x44
++#define       CPU_IMPL_FUJITSU        0x46
++#define       CPU_IMPL_HISILICON      0x48
+ #define       CPU_IMPL_INFINEON       0x49
+ #define       CPU_IMPL_FREESCALE      0x4D
+ #define       CPU_IMPL_NVIDIA         0x4E
+ #define       CPU_IMPL_APM            0x50
+ #define       CPU_IMPL_QUALCOMM       0x51
+ #define       CPU_IMPL_MARVELL        0x56
++#define       CPU_IMPL_APPLE          0x61
+ #define       CPU_IMPL_INTEL          0x69
++#define       CPU_IMPL_AMPERE         0xC0
++#define       CPU_IMPL_MICROSOFT      0x6D
+ 
+ /* ARM Part numbers */
+ #define       CPU_PART_FOUNDATION     0xD00
+-#define       CPU_PART_CORTEX_A35     0xD04
++#define       CPU_PART_CORTEX_A34     0xD02
+ #define       CPU_PART_CORTEX_A53     0xD03
++#define       CPU_PART_CORTEX_A35     0xD04
+ #define       CPU_PART_CORTEX_A55     0xD05
++#define       CPU_PART_CORTEX_A65     0xD06
+ #define       CPU_PART_CORTEX_A57     0xD07
+ #define       CPU_PART_CORTEX_A72     0xD08
+ #define       CPU_PART_CORTEX_A73     0xD09
+ #define       CPU_PART_CORTEX_A75     0xD0A
++#define       CPU_PART_CORTEX_A76     0xD0B
++#define       CPU_PART_NEOVERSE_N1    0xD0C
++#define       CPU_PART_CORTEX_A77     0xD0D
++#define       CPU_PART_CORTEX_A76AE   0xD0E
++#define       CPU_PART_AEM_V8         0xD0F
++#define       CPU_PART_NEOVERSE_V1    0xD40
++#define       CPU_PART_CORTEX_A78     0xD41
++#define       CPU_PART_CORTEX_A78AE   0xD42
++#define       CPU_PART_CORTEX_A65AE   0xD43
++#define       CPU_PART_CORTEX_X1      0xD44
++#define       CPU_PART_CORTEX_A510    0xD46
++#define       CPU_PART_CORTEX_A710    0xD47
++#define       CPU_PART_CORTEX_X2      0xD48
++#define       CPU_PART_NEOVERSE_N2    0xD49
++#define       CPU_PART_NEOVERSE_E1    0xD4A
++#define       CPU_PART_CORTEX_A78C    0xD4B
++#define       CPU_PART_CORTEX_X1C     0xD4C
++#define       CPU_PART_CORTEX_A715    0xD4D
++#define       CPU_PART_CORTEX_X3      0xD4E
++#define       CPU_PART_NEOVERSE_V2    0xD4F
++#define       CPU_PART_CORTEX_A520    0xD80
++#define       CPU_PART_CORTEX_A720    0xD81
++#define       CPU_PART_CORTEX_X4      0xD82
++#define       CPU_PART_NEOVERSE_V3AE  0xD83
++#define       CPU_PART_NEOVERSE_V3    0xD84
++#define       CPU_PART_CORTEX_X925    0xD85
++#define       CPU_PART_CORTEX_A725    0xD87
++#define       CPU_PART_C1_NANO        0xD8A
++#define       CPU_PART_C1_PRO         0xD8B
++#define       CPU_PART_C1_ULTRA       0xD8C
++#define       CPU_PART_NEOVERSE_N3    0xD8E
++#define       CPU_PART_C1_PREMIUM     0xD90
+ 
+ /* Cavium Part numbers */
+ #define       CPU_PART_THUNDERX       0x0A1
+@@ -232,21 +211,40 @@ bool VM_Version::is_cpu_emulated() {
+ 
+ #define       CPU_REV_THUNDERX2_0     0x00
+ 
++/* APM (now Ampere) Part number */
++#define CPU_PART_EMAG8180     0x000
++
++/* Ampere Part numbers */
++#define       CPU_PART_AMPERE1        0xAC3
++#define       CPU_PART_AMPERE1A       0xAC4
++
++/* Microsoft Part numbers */
++#define       CPU_PART_AZURE_COBALT_100       0xD49
++
++/* Qualcomm */
++#define       CPU_PART_KRYO400_GOLD   0x804
++#define       CPU_PART_KRYO400_SILVER 0x805
++
++/* Apple part numbers */
++#define CPU_PART_M1_ICESTORM      0x022
++#define CPU_PART_M1_FIRESTORM     0x023
++#define CPU_PART_M1_ICESTORM_PRO  0x024
++#define CPU_PART_M1_FIRESTORM_PRO 0x025
++#define CPU_PART_M1_ICESTORM_MAX  0x028
++#define CPU_PART_M1_FIRESTORM_MAX 0x029
++#define CPU_PART_M2_BLIZZARD      0x032
++#define CPU_PART_M2_AVALANCHE     0x033
++#define CPU_PART_M2_BLIZZARD_PRO  0x034
++#define CPU_PART_M2_AVALANCHE_PRO 0x035
++#define CPU_PART_M2_BLIZZARD_MAX  0x038
++#define CPU_PART_M2_AVALANCHE_MAX 0x039
++
+ #define       CPU_IMPL(midr)  (((midr) >> 24) & 0xff)
+ #define       CPU_PART(midr)  (((midr) >> 4) & 0xfff)
+ #define       CPU_VAR(midr)   (((midr) >> 20) & 0xf)
+ #define       CPU_REV(midr)   (((midr) >> 0) & 0xf)
+ #define UL(x)   UINT64_C(x)
+ 
+-struct cpu_desc {
+-      u_int           cpu_impl;
+-      u_int           cpu_part_num;
+-      u_int           cpu_variant;
+-      u_int           cpu_revision;
+-      const char      *cpu_impl_name;
+-      const char      *cpu_part_name;
+-};
+-
+ struct cpu_parts {
+       u_int           part_id;
+       const char      *part_name;
+@@ -269,16 +267,51 @@ struct cpu_implementers {
+  */
+ /* ARM Ltd. */
+ static const struct cpu_parts cpu_parts_arm[] = {
++      { CPU_PART_AEM_V8, "AEMv8" },
+       { CPU_PART_FOUNDATION, "Foundation-Model" },
++      { CPU_PART_CORTEX_A34, "Cortex-A34" },
+       { CPU_PART_CORTEX_A35, "Cortex-A35" },
+       { CPU_PART_CORTEX_A53, "Cortex-A53" },
+       { CPU_PART_CORTEX_A55, "Cortex-A55" },
+       { CPU_PART_CORTEX_A57, "Cortex-A57" },
++      { CPU_PART_CORTEX_A65, "Cortex-A65" },
++      { CPU_PART_CORTEX_A65AE, "Cortex-A65AE" },
+       { CPU_PART_CORTEX_A72, "Cortex-A72" },
+       { CPU_PART_CORTEX_A73, "Cortex-A73" },
+       { CPU_PART_CORTEX_A75, "Cortex-A75" },
++      { CPU_PART_CORTEX_A76, "Cortex-A76" },
++      { CPU_PART_CORTEX_A76AE, "Cortex-A76AE" },
++      { CPU_PART_CORTEX_A77, "Cortex-A77" },
++      { CPU_PART_CORTEX_A78, "Cortex-A78" },
++      { CPU_PART_CORTEX_A78AE, "Cortex-A78AE" },
++      { CPU_PART_CORTEX_A78C, "Cortex-A78C" },
++      { CPU_PART_CORTEX_A510, "Cortex-A510" },
++      { CPU_PART_CORTEX_A520, "Cortex-A520" },
++      { CPU_PART_CORTEX_A710, "Cortex-A710" },
++      { CPU_PART_CORTEX_A715, "Cortex-A715" },
++      { CPU_PART_CORTEX_A720, "Cortex-A720" },
++      { CPU_PART_CORTEX_A725, "Cortex-A725" },
++      { CPU_PART_CORTEX_X925, "Cortex-A925" },
++      { CPU_PART_CORTEX_X1, "Cortex-X1" },
++      { CPU_PART_CORTEX_X1C, "Cortex-X1C" },
++      { CPU_PART_CORTEX_X2, "Cortex-X2" },
++      { CPU_PART_CORTEX_X3, "Cortex-X3" },
++      { CPU_PART_CORTEX_X4, "Cortex-X4" },
++      { CPU_PART_C1_NANO, "C1-Nano" },
++      { CPU_PART_C1_PRO, "C1-Pro" },
++      { CPU_PART_C1_PREMIUM, "C1-Premium" },
++      { CPU_PART_C1_ULTRA, "C1-Ultra" },
++      { CPU_PART_NEOVERSE_E1, "Neoverse-E1" },
++      { CPU_PART_NEOVERSE_N1, "Neoverse-N1" },
++      { CPU_PART_NEOVERSE_N2, "Neoverse-N2" },
++      { CPU_PART_NEOVERSE_N3, "Neoverse-N3" },
++      { CPU_PART_NEOVERSE_V1, "Neoverse-V1" },
++      { CPU_PART_NEOVERSE_V2, "Neoverse-V2" },
++      { CPU_PART_NEOVERSE_V3, "Neoverse-V3" },
++      { CPU_PART_NEOVERSE_V3AE, "Neoverse-V3AE" },
+       CPU_PART_NONE,
+ };
++
+ /* Cavium */
+ static const struct cpu_parts cpu_parts_cavium[] = {
+       { CPU_PART_THUNDERX, "ThunderX" },
+@@ -286,6 +319,49 @@ static const struct cpu_parts cpu_parts_cavium[] = {
+       CPU_PART_NONE,
+ };
+ 
++/* APM (now Ampere) */
++static const struct cpu_parts cpu_parts_apm[] = {
++      { CPU_PART_EMAG8180, "eMAG 8180" },
++      CPU_PART_NONE,
++};
++
++/* Ampere */
++static const struct cpu_parts cpu_parts_ampere[] = {
++      { CPU_PART_AMPERE1, "AmpereOne AC03" },
++      { CPU_PART_AMPERE1A, "AmpereOne AC04" },
++      CPU_PART_NONE,
++};
++
++/* Microsoft */
++static const struct cpu_parts cpu_parts_microsoft[] = {
++      { CPU_PART_AZURE_COBALT_100, "Azure Cobalt 100" },
++      CPU_PART_NONE,
++};
++
++/* Qualcomm */
++static const struct cpu_parts cpu_parts_qcom[] = {
++      { CPU_PART_KRYO400_GOLD, "Kryo 400 Gold" },
++      { CPU_PART_KRYO400_SILVER, "Kryo 400 Silver" },
++      CPU_PART_NONE,
++};
++
++/* Apple */
++static const struct cpu_parts cpu_parts_apple[] = {
++      { CPU_PART_M1_ICESTORM, "M1 Icestorm" },
++      { CPU_PART_M1_FIRESTORM, "M1 Firestorm" },
++      { CPU_PART_M1_ICESTORM_PRO, "M1 Pro Icestorm" },
++      { CPU_PART_M1_FIRESTORM_PRO, "M1 Pro Firestorm" },
++      { CPU_PART_M1_ICESTORM_MAX, "M1 Max Icestorm" },
++      { CPU_PART_M1_FIRESTORM_MAX, "M1 Max Firestorm" },
++      { CPU_PART_M2_BLIZZARD, "M2 Blizzard" },
++      { CPU_PART_M2_AVALANCHE, "M2 Avalanche" },
++      { CPU_PART_M2_BLIZZARD_PRO, "M2 Pro Blizzard" },
++      { CPU_PART_M2_AVALANCHE_PRO, "M2 Pro Avalanche" },
++      { CPU_PART_M2_BLIZZARD_MAX, "M2 Max Blizzard" },
++      { CPU_PART_M2_AVALANCHE_MAX, "M2 Max Avalanche" },
++      CPU_PART_NONE,
++};
++
+ /* Unknown */
+ static const struct cpu_parts cpu_parts_none[] = {
+       CPU_PART_NONE,
+@@ -295,74 +371,69 @@ static const struct cpu_parts cpu_parts_none[] = {
+  * Implementers table.
+  */
+ const struct cpu_implementers cpu_implementers[] = {
++      { CPU_IMPL_AMPERE,      "Ampere",       cpu_parts_ampere },
++      { CPU_IMPL_APPLE,       "Apple",        cpu_parts_apple },
++      { CPU_IMPL_APM,         "APM",          cpu_parts_apm },
+       { CPU_IMPL_ARM,         "ARM",          cpu_parts_arm },
+       { CPU_IMPL_BROADCOM,    "Broadcom",     cpu_parts_none },
+       { CPU_IMPL_CAVIUM,      "Cavium",       cpu_parts_cavium },
+       { CPU_IMPL_DEC,         "DEC",          cpu_parts_none },
+-      { CPU_IMPL_INFINEON,    "IFX",          cpu_parts_none },
+       { CPU_IMPL_FREESCALE,   "Freescale",    cpu_parts_none },
+-      { CPU_IMPL_NVIDIA,      "NVIDIA",       cpu_parts_none },
+-      { CPU_IMPL_APM,         "APM",          cpu_parts_none },
+-      { CPU_IMPL_QUALCOMM,    "Qualcomm",     cpu_parts_none },
+-      { CPU_IMPL_MARVELL,     "Marvell",      cpu_parts_none },
++      { CPU_IMPL_FUJITSU,     "Fujitsu",      cpu_parts_none },
++      { CPU_IMPL_HISILICON,   "HiSilicon",    cpu_parts_none },
++      { CPU_IMPL_INFINEON,    "IFX",          cpu_parts_none },
+       { CPU_IMPL_INTEL,       "Intel",        cpu_parts_none },
++      { CPU_IMPL_MARVELL,     "Marvell",      cpu_parts_none },
++      { CPU_IMPL_MICROSOFT,   "Microsoft",    cpu_parts_microsoft },
++      { CPU_IMPL_NVIDIA,      "NVIDIA",       cpu_parts_none },
++      { CPU_IMPL_QUALCOMM,    "Qualcomm",     cpu_parts_qcom },
+       CPU_IMPLEMENTER_NONE,
+ };
+ 
+-#ifdef __FreeBSD__
+-static unsigned long os_get_processor_features() {
++void VM_Version::get_os_cpu_info() {
++#if defined(__FreeBSD__) || defined(__OpenBSD__)
++
+   unsigned long auxv = 0;
+-  uint64_t id_aa64isar0, id_aa64pfr0;
++  unsigned long auxv2 = 0;
++  elf_aux_info(AT_HWCAP, &auxv, sizeof(auxv));
++  elf_aux_info(AT_HWCAP2, &auxv2, sizeof(auxv2));
+ 
+-  id_aa64isar0 = READ_SPECIALREG(id_aa64isar0_el1);
+-  id_aa64pfr0 = READ_SPECIALREG(id_aa64pfr0_el1);
++  static_assert(CPU_FP      == HWCAP_FP,      "Flag CPU_FP must follow Linux 
HWCAP");
++  static_assert(CPU_ASIMD   == HWCAP_ASIMD,   "Flag CPU_ASIMD must follow 
Linux HWCAP");
++  static_assert(CPU_EVTSTRM == HWCAP_EVTSTRM, "Flag CPU_EVTSTRM must follow 
Linux HWCAP");
++  static_assert(CPU_AES     == HWCAP_AES,     "Flag CPU_AES must follow Linux 
HWCAP");
++  static_assert(CPU_PMULL   == HWCAP_PMULL,   "Flag CPU_PMULL must follow 
Linux HWCAP");
++  static_assert(CPU_SHA1    == HWCAP_SHA1,    "Flag CPU_SHA1 must follow 
Linux HWCAP");
++  static_assert(CPU_SHA2    == HWCAP_SHA2,    "Flag CPU_SHA2 must follow 
Linux HWCAP");
++  static_assert(CPU_CRC32   == HWCAP_CRC32,   "Flag CPU_CRC32 must follow 
Linux HWCAP");
++  static_assert(CPU_LSE     == HWCAP_ATOMICS, "Flag CPU_LSE must follow Linux 
HWCAP");
++  static_assert(CPU_DCPOP   == HWCAP_DCPOP,   "Flag CPU_DCPOP must follow 
Linux HWCAP");
++  static_assert(CPU_SHA3    == HWCAP_SHA3,    "Flag CPU_SHA3 must follow 
Linux HWCAP");
++  static_assert(CPU_SHA512  == HWCAP_SHA512,  "Flag CPU_SHA512 must follow 
Linux HWCAP");
++  static_assert(CPU_SVE     == HWCAP_SVE,     "Flag CPU_SVE must follow Linux 
HWCAP");
++  static_assert(CPU_PACA    == HWCAP_PACA,    "Flag CPU_PACA must follow 
Linux HWCAP");
++  _features = auxv & (
++      HWCAP_FP      |
++      HWCAP_ASIMD   |
++      HWCAP_EVTSTRM |
++      HWCAP_AES     |
++      HWCAP_PMULL   |
++      HWCAP_SHA1    |
++      HWCAP_SHA2    |
++      HWCAP_CRC32   |
++      HWCAP_ATOMICS |
++      HWCAP_DCPOP   |
++      HWCAP_SHA3    |
++      HWCAP_SHA512  |
++      HWCAP_SVE     |
++      HWCAP_PACA);
+ 
+-  if (ID_AA64ISAR0_AES_VAL(id_aa64isar0) == ID_AA64ISAR0_AES_BASE) {
+-    auxv = auxv | HWCAP_AES;
+-  }
++  if (auxv2 & HWCAP2_SVE2) _features |= CPU_SVE2;
++  if (auxv2 & HWCAP2_SVEBITPERM) _features |= CPU_SVEBITPERM;
+ 
+-  if (ID_AA64ISAR0_AES_VAL(id_aa64isar0) == ID_AA64ISAR0_AES_PMULL) {
+-    auxv = auxv | HWCAP_PMULL;
+-  }
+-
+-  if (ID_AA64ISAR0_SHA1_VAL(id_aa64isar0) == ID_AA64ISAR0_SHA1_BASE) {
+-    auxv = auxv | HWCAP_SHA1;
+-  }
+-
+-  if (ID_AA64ISAR0_SHA2_VAL(id_aa64isar0) == ID_AA64ISAR0_SHA2_BASE) {
+-    auxv = auxv | HWCAP_SHA2;
+-  }
+-
+-  if (ID_AA64ISAR0_CRC32_VAL(id_aa64isar0) == ID_AA64ISAR0_CRC32_BASE) {
+-    auxv = auxv | HWCAP_CRC32;
+-  }
+-
+-  if (ID_AA64PFR0_AdvSIMD(id_aa64pfr0) == ID_AA64PFR0_AdvSIMD_IMPL || \
+-      ID_AA64PFR0_AdvSIMD(id_aa64pfr0) == ID_AA64PFR0_AdvSIMD_HP ) {
+-    auxv = auxv | HWCAP_ASIMD;
+-  }
+-
+-  return auxv;
+-}
+-#endif
+-
+-void VM_Version::get_os_cpu_info() {
+-#ifdef __OpenBSD__
+-  // READ_SPECIALREG is not available from userland on OpenBSD.
+-  // Hardcode these values to the "lowest common denominator"
+-  _cpu = CPU_IMPL_ARM;
+-  _model = CPU_PART_CORTEX_A53;
+-  _variant = 0;
+-  _revision = 0;
+-  _features = HWCAP_ASIMD;
+-#elif defined(__FreeBSD__)
+-  struct cpu_desc cpu_desc[1];
+-  struct cpu_desc user_cpu_desc;
+-
+   uint32_t midr;
+   uint32_t impl_id;
+   uint32_t part_id;
+-  uint32_t cpu = 0;
+   size_t i;
+   const struct cpu_parts *cpu_partsp = nullptr;
+ 
+@@ -372,8 +443,7 @@ void VM_Version::get_os_cpu_info() {
+   for (i = 0; i < nitems(cpu_implementers); i++) {
+     if (impl_id == cpu_implementers[i].impl_id ||
+       cpu_implementers[i].impl_id == 0) {
+-      cpu_desc[cpu].cpu_impl = impl_id;
+-      cpu_desc[cpu].cpu_impl_name = cpu_implementers[i].impl_name;
++      _cpu = impl_id;
+       cpu_partsp = cpu_implementers[i].cpu_parts;
+       break;
+     }
+@@ -381,36 +451,13 @@ void VM_Version::get_os_cpu_info() {
+   part_id = CPU_PART(midr);
+   for (i = 0; &cpu_partsp[i] != nullptr; i++) {
+     if (part_id == cpu_partsp[i].part_id || cpu_partsp[i].part_id == 0) {
+-      cpu_desc[cpu].cpu_part_num = part_id;
+-      cpu_desc[cpu].cpu_part_name = cpu_partsp[i].part_name;
++      _model = part_id;
+       break;
+     }
+   }
+ 
+-  cpu_desc[cpu].cpu_revision = CPU_REV(midr);
+-  cpu_desc[cpu].cpu_variant = CPU_VAR(midr);
+-
+-  _cpu = cpu_desc[cpu].cpu_impl;
+-  _variant = cpu_desc[cpu].cpu_variant;
+-  _model = cpu_desc[cpu].cpu_part_num;
+-  _revision = cpu_desc[cpu].cpu_revision;
+-
+-  uint64_t auxv = os_get_processor_features();
+-
+-  _features = auxv & (
+-      HWCAP_FP      |
+-      HWCAP_ASIMD   |
+-      HWCAP_EVTSTRM |
+-      HWCAP_AES     |
+-      HWCAP_PMULL   |
+-      HWCAP_SHA1    |
+-      HWCAP_SHA2    |
+-      HWCAP_CRC32   |
+-      HWCAP_ATOMICS |
+-      HWCAP_DCPOP   |
+-      HWCAP_SHA3    |
+-      HWCAP_SHA512  |
+-      HWCAP_SVE);
++  _variant = CPU_VAR(midr);
++  _revision = CPU_REV(midr);
+ #endif
+ 
+   uint64_t ctr_el0;
Index: 25/Makefile
===================================================================
RCS file: /cvs/ports/devel/jdk/25/Makefile,v
diff -u -p -u -r1.3 Makefile
--- 25/Makefile 13 Nov 2025 22:48:44 -0000      1.3
+++ 25/Makefile 13 Dec 2025 23:35:01 -0000
@@ -12,6 +12,7 @@ PACKAGE_VER=  ${BASE_VER}.${BUILD_VER}.${
 PKGNAME=       jdk-${PACKAGE_VER}
 PKGSTEM=       jdk-25
 EPOCH=         0
+REVISION=      0
 
 DIST_SUBDIR=   jdk
 DISTNAME=      jdk-${VERSION_STR}
Index: 
25/patches/patch-src_hotspot_os_cpu_bsd_aarch64_vm_version_bsd_aarch64_cpp
===================================================================
RCS file: 
25/patches/patch-src_hotspot_os_cpu_bsd_aarch64_vm_version_bsd_aarch64_cpp
diff -N 
25/patches/patch-src_hotspot_os_cpu_bsd_aarch64_vm_version_bsd_aarch64_cpp
--- /dev/null   1 Jan 1970 00:00:00 -0000
+++ 25/patches/patch-src_hotspot_os_cpu_bsd_aarch64_vm_version_bsd_aarch64_cpp  
13 Dec 2025 23:35:01 -0000
@@ -0,0 +1,468 @@
+Detect CPU, model, variant and revision.
+Get hardware capablilites using elf_aux_info(3).
+
+Index: src/hotspot/os_cpu/bsd_aarch64/vm_version_bsd_aarch64.cpp
+--- src/hotspot/os_cpu/bsd_aarch64/vm_version_bsd_aarch64.cpp.orig
++++ src/hotspot/os_cpu/bsd_aarch64/vm_version_bsd_aarch64.cpp
+@@ -135,91 +135,70 @@ bool VM_Version::is_cpu_emulated() {
+ #else // __APPLE__
+ 
+ #include <machine/armreg.h>
+-#if defined (__FreeBSD__)
+-#include <machine/elf.h>
++#if defined (__FreeBSD__) || defined (__OpenBSD__)
++#include <sys/auxv.h>
+ #endif
+ 
+-#ifndef HWCAP_ASIMD
+-#define HWCAP_ASIMD (1<<1)
+-#endif
+-
+-#ifndef HWCAP_AES
+-#define HWCAP_AES   (1<<3)
+-#endif
+-
+-#ifndef HWCAP_PMULL
+-#define HWCAP_PMULL (1<<4)
+-#endif
+-
+-#ifndef HWCAP_SHA1
+-#define HWCAP_SHA1  (1<<5)
+-#endif
+-
+-#ifndef HWCAP_SHA2
+-#define HWCAP_SHA2  (1<<6)
+-#endif
+-
+-#ifndef HWCAP_CRC32
+-#define HWCAP_CRC32 (1<<7)
+-#endif
+-
+-#ifndef HWCAP_ATOMICS
+-#define HWCAP_ATOMICS (1<<8)
+-#endif
+-
+-#ifndef ID_AA64PFR0_AdvSIMD_SHIFT
+-#define ID_AA64PFR0_AdvSIMD_SHIFT 20
+-#endif
+-
+-#ifndef ID_AA64PFR0_AdvSIMD
+-#define ID_AA64PFR0_AdvSIMD(x) ((x) & (UL(0xf) << ID_AA64PFR0_AdvSIMD_SHIFT))
+-#endif
+-
+-#ifndef ID_AA64PFR0_AdvSIMD_IMPL
+-#define ID_AA64PFR0_AdvSIMD_IMPL (UL(0x0) << ID_AA64PFR0_AdvSIMD_SHIFT)
+-#endif
+-
+-#ifndef ID_AA64PFR0_AdvSIMD_HP
+-#define ID_AA64PFR0_AdvSIMD_HP (UL(0x1) << ID_AA64PFR0_AdvSIMD_SHIFT)
+-#endif
+-
+-#ifndef ID_AA64ISAR0_AES_VAL
+-#define ID_AA64ISAR0_AES_VAL ID_AA64ISAR0_AES
+-#endif
+-
+-#ifndef ID_AA64ISAR0_SHA1_VAL
+-#define ID_AA64ISAR0_SHA1_VAL ID_AA64ISAR0_SHA1
+-#endif
+-
+-#ifndef ID_AA64ISAR0_SHA2_VAL
+-#define ID_AA64ISAR0_SHA2_VAL ID_AA64ISAR0_SHA2
+-#endif
+-
+-#ifndef ID_AA64ISAR0_CRC32_VAL
+-#define ID_AA64ISAR0_CRC32_VAL ID_AA64ISAR0_CRC32
+-#endif
+-
+ #define       CPU_IMPL_ARM            0x41
+ #define       CPU_IMPL_BROADCOM       0x42
+ #define       CPU_IMPL_CAVIUM         0x43
+ #define       CPU_IMPL_DEC            0x44
++#define       CPU_IMPL_FUJITSU        0x46
++#define       CPU_IMPL_HISILICON      0x48
+ #define       CPU_IMPL_INFINEON       0x49
+ #define       CPU_IMPL_FREESCALE      0x4D
+ #define       CPU_IMPL_NVIDIA         0x4E
+ #define       CPU_IMPL_APM            0x50
+ #define       CPU_IMPL_QUALCOMM       0x51
+ #define       CPU_IMPL_MARVELL        0x56
++#define       CPU_IMPL_APPLE          0x61
+ #define       CPU_IMPL_INTEL          0x69
++#define       CPU_IMPL_AMPERE         0xC0
++#define       CPU_IMPL_MICROSOFT      0x6D
+ 
+ /* ARM Part numbers */
+ #define       CPU_PART_FOUNDATION     0xD00
+-#define       CPU_PART_CORTEX_A35     0xD04
++#define       CPU_PART_CORTEX_A34     0xD02
+ #define       CPU_PART_CORTEX_A53     0xD03
++#define       CPU_PART_CORTEX_A35     0xD04
+ #define       CPU_PART_CORTEX_A55     0xD05
++#define       CPU_PART_CORTEX_A65     0xD06
+ #define       CPU_PART_CORTEX_A57     0xD07
+ #define       CPU_PART_CORTEX_A72     0xD08
+ #define       CPU_PART_CORTEX_A73     0xD09
+ #define       CPU_PART_CORTEX_A75     0xD0A
++#define       CPU_PART_CORTEX_A76     0xD0B
++#define       CPU_PART_NEOVERSE_N1    0xD0C
++#define       CPU_PART_CORTEX_A77     0xD0D
++#define       CPU_PART_CORTEX_A76AE   0xD0E
++#define       CPU_PART_AEM_V8         0xD0F
++#define       CPU_PART_NEOVERSE_V1    0xD40
++#define       CPU_PART_CORTEX_A78     0xD41
++#define       CPU_PART_CORTEX_A78AE   0xD42
++#define       CPU_PART_CORTEX_A65AE   0xD43
++#define       CPU_PART_CORTEX_X1      0xD44
++#define       CPU_PART_CORTEX_A510    0xD46
++#define       CPU_PART_CORTEX_A710    0xD47
++#define       CPU_PART_CORTEX_X2      0xD48
++#define       CPU_PART_NEOVERSE_N2    0xD49
++#define       CPU_PART_NEOVERSE_E1    0xD4A
++#define       CPU_PART_CORTEX_A78C    0xD4B
++#define       CPU_PART_CORTEX_X1C     0xD4C
++#define       CPU_PART_CORTEX_A715    0xD4D
++#define       CPU_PART_CORTEX_X3      0xD4E
++#define       CPU_PART_NEOVERSE_V2    0xD4F
++#define       CPU_PART_CORTEX_A520    0xD80
++#define       CPU_PART_CORTEX_A720    0xD81
++#define       CPU_PART_CORTEX_X4      0xD82
++#define       CPU_PART_NEOVERSE_V3AE  0xD83
++#define       CPU_PART_NEOVERSE_V3    0xD84
++#define       CPU_PART_CORTEX_X925    0xD85
++#define       CPU_PART_CORTEX_A725    0xD87
++#define       CPU_PART_C1_NANO        0xD8A
++#define       CPU_PART_C1_PRO         0xD8B
++#define       CPU_PART_C1_ULTRA       0xD8C
++#define       CPU_PART_NEOVERSE_N3    0xD8E
++#define       CPU_PART_C1_PREMIUM     0xD90
+ 
+ /* Cavium Part numbers */
+ #define       CPU_PART_THUNDERX       0x0A1
+@@ -232,21 +211,40 @@ bool VM_Version::is_cpu_emulated() {
+ 
+ #define       CPU_REV_THUNDERX2_0     0x00
+ 
++/* APM (now Ampere) Part number */
++#define CPU_PART_EMAG8180     0x000
++
++/* Ampere Part numbers */
++#define       CPU_PART_AMPERE1        0xAC3
++#define       CPU_PART_AMPERE1A       0xAC4
++
++/* Microsoft Part numbers */
++#define       CPU_PART_AZURE_COBALT_100       0xD49
++
++/* Qualcomm */
++#define       CPU_PART_KRYO400_GOLD   0x804
++#define       CPU_PART_KRYO400_SILVER 0x805
++
++/* Apple part numbers */
++#define CPU_PART_M1_ICESTORM      0x022
++#define CPU_PART_M1_FIRESTORM     0x023
++#define CPU_PART_M1_ICESTORM_PRO  0x024
++#define CPU_PART_M1_FIRESTORM_PRO 0x025
++#define CPU_PART_M1_ICESTORM_MAX  0x028
++#define CPU_PART_M1_FIRESTORM_MAX 0x029
++#define CPU_PART_M2_BLIZZARD      0x032
++#define CPU_PART_M2_AVALANCHE     0x033
++#define CPU_PART_M2_BLIZZARD_PRO  0x034
++#define CPU_PART_M2_AVALANCHE_PRO 0x035
++#define CPU_PART_M2_BLIZZARD_MAX  0x038
++#define CPU_PART_M2_AVALANCHE_MAX 0x039
++
+ #define       CPU_IMPL(midr)  (((midr) >> 24) & 0xff)
+ #define       CPU_PART(midr)  (((midr) >> 4) & 0xfff)
+ #define       CPU_VAR(midr)   (((midr) >> 20) & 0xf)
+ #define       CPU_REV(midr)   (((midr) >> 0) & 0xf)
+ #define UL(x)   UINT64_C(x)
+ 
+-struct cpu_desc {
+-      u_int           cpu_impl;
+-      u_int           cpu_part_num;
+-      u_int           cpu_variant;
+-      u_int           cpu_revision;
+-      const char      *cpu_impl_name;
+-      const char      *cpu_part_name;
+-};
+-
+ struct cpu_parts {
+       u_int           part_id;
+       const char      *part_name;
+@@ -269,16 +267,51 @@ struct cpu_implementers {
+  */
+ /* ARM Ltd. */
+ static const struct cpu_parts cpu_parts_arm[] = {
++      { CPU_PART_AEM_V8, "AEMv8" },
+       { CPU_PART_FOUNDATION, "Foundation-Model" },
++      { CPU_PART_CORTEX_A34, "Cortex-A34" },
+       { CPU_PART_CORTEX_A35, "Cortex-A35" },
+       { CPU_PART_CORTEX_A53, "Cortex-A53" },
+       { CPU_PART_CORTEX_A55, "Cortex-A55" },
+       { CPU_PART_CORTEX_A57, "Cortex-A57" },
++      { CPU_PART_CORTEX_A65, "Cortex-A65" },
++      { CPU_PART_CORTEX_A65AE, "Cortex-A65AE" },
+       { CPU_PART_CORTEX_A72, "Cortex-A72" },
+       { CPU_PART_CORTEX_A73, "Cortex-A73" },
+       { CPU_PART_CORTEX_A75, "Cortex-A75" },
++      { CPU_PART_CORTEX_A76, "Cortex-A76" },
++      { CPU_PART_CORTEX_A76AE, "Cortex-A76AE" },
++      { CPU_PART_CORTEX_A77, "Cortex-A77" },
++      { CPU_PART_CORTEX_A78, "Cortex-A78" },
++      { CPU_PART_CORTEX_A78AE, "Cortex-A78AE" },
++      { CPU_PART_CORTEX_A78C, "Cortex-A78C" },
++      { CPU_PART_CORTEX_A510, "Cortex-A510" },
++      { CPU_PART_CORTEX_A520, "Cortex-A520" },
++      { CPU_PART_CORTEX_A710, "Cortex-A710" },
++      { CPU_PART_CORTEX_A715, "Cortex-A715" },
++      { CPU_PART_CORTEX_A720, "Cortex-A720" },
++      { CPU_PART_CORTEX_A725, "Cortex-A725" },
++      { CPU_PART_CORTEX_X925, "Cortex-A925" },
++      { CPU_PART_CORTEX_X1, "Cortex-X1" },
++      { CPU_PART_CORTEX_X1C, "Cortex-X1C" },
++      { CPU_PART_CORTEX_X2, "Cortex-X2" },
++      { CPU_PART_CORTEX_X3, "Cortex-X3" },
++      { CPU_PART_CORTEX_X4, "Cortex-X4" },
++      { CPU_PART_C1_NANO, "C1-Nano" },
++      { CPU_PART_C1_PRO, "C1-Pro" },
++      { CPU_PART_C1_PREMIUM, "C1-Premium" },
++      { CPU_PART_C1_ULTRA, "C1-Ultra" },
++      { CPU_PART_NEOVERSE_E1, "Neoverse-E1" },
++      { CPU_PART_NEOVERSE_N1, "Neoverse-N1" },
++      { CPU_PART_NEOVERSE_N2, "Neoverse-N2" },
++      { CPU_PART_NEOVERSE_N3, "Neoverse-N3" },
++      { CPU_PART_NEOVERSE_V1, "Neoverse-V1" },
++      { CPU_PART_NEOVERSE_V2, "Neoverse-V2" },
++      { CPU_PART_NEOVERSE_V3, "Neoverse-V3" },
++      { CPU_PART_NEOVERSE_V3AE, "Neoverse-V3AE" },
+       CPU_PART_NONE,
+ };
++
+ /* Cavium */
+ static const struct cpu_parts cpu_parts_cavium[] = {
+       { CPU_PART_THUNDERX, "ThunderX" },
+@@ -286,6 +319,49 @@ static const struct cpu_parts cpu_parts_cavium[] = {
+       CPU_PART_NONE,
+ };
+ 
++/* APM (now Ampere) */
++static const struct cpu_parts cpu_parts_apm[] = {
++      { CPU_PART_EMAG8180, "eMAG 8180" },
++      CPU_PART_NONE,
++};
++
++/* Ampere */
++static const struct cpu_parts cpu_parts_ampere[] = {
++      { CPU_PART_AMPERE1, "AmpereOne AC03" },
++      { CPU_PART_AMPERE1A, "AmpereOne AC04" },
++      CPU_PART_NONE,
++};
++
++/* Microsoft */
++static const struct cpu_parts cpu_parts_microsoft[] = {
++      { CPU_PART_AZURE_COBALT_100, "Azure Cobalt 100" },
++      CPU_PART_NONE,
++};
++
++/* Qualcomm */
++static const struct cpu_parts cpu_parts_qcom[] = {
++      { CPU_PART_KRYO400_GOLD, "Kryo 400 Gold" },
++      { CPU_PART_KRYO400_SILVER, "Kryo 400 Silver" },
++      CPU_PART_NONE,
++};
++
++/* Apple */
++static const struct cpu_parts cpu_parts_apple[] = {
++      { CPU_PART_M1_ICESTORM, "M1 Icestorm" },
++      { CPU_PART_M1_FIRESTORM, "M1 Firestorm" },
++      { CPU_PART_M1_ICESTORM_PRO, "M1 Pro Icestorm" },
++      { CPU_PART_M1_FIRESTORM_PRO, "M1 Pro Firestorm" },
++      { CPU_PART_M1_ICESTORM_MAX, "M1 Max Icestorm" },
++      { CPU_PART_M1_FIRESTORM_MAX, "M1 Max Firestorm" },
++      { CPU_PART_M2_BLIZZARD, "M2 Blizzard" },
++      { CPU_PART_M2_AVALANCHE, "M2 Avalanche" },
++      { CPU_PART_M2_BLIZZARD_PRO, "M2 Pro Blizzard" },
++      { CPU_PART_M2_AVALANCHE_PRO, "M2 Pro Avalanche" },
++      { CPU_PART_M2_BLIZZARD_MAX, "M2 Max Blizzard" },
++      { CPU_PART_M2_AVALANCHE_MAX, "M2 Max Avalanche" },
++      CPU_PART_NONE,
++};
++
+ /* Unknown */
+ static const struct cpu_parts cpu_parts_none[] = {
+       CPU_PART_NONE,
+@@ -295,74 +371,73 @@ static const struct cpu_parts cpu_parts_none[] = {
+  * Implementers table.
+  */
+ const struct cpu_implementers cpu_implementers[] = {
++      { CPU_IMPL_AMPERE,      "Ampere",       cpu_parts_ampere },
++      { CPU_IMPL_APPLE,       "Apple",        cpu_parts_apple },
++      { CPU_IMPL_APM,         "APM",          cpu_parts_apm },
+       { CPU_IMPL_ARM,         "ARM",          cpu_parts_arm },
+       { CPU_IMPL_BROADCOM,    "Broadcom",     cpu_parts_none },
+       { CPU_IMPL_CAVIUM,      "Cavium",       cpu_parts_cavium },
+       { CPU_IMPL_DEC,         "DEC",          cpu_parts_none },
+-      { CPU_IMPL_INFINEON,    "IFX",          cpu_parts_none },
+       { CPU_IMPL_FREESCALE,   "Freescale",    cpu_parts_none },
+-      { CPU_IMPL_NVIDIA,      "NVIDIA",       cpu_parts_none },
+-      { CPU_IMPL_APM,         "APM",          cpu_parts_none },
+-      { CPU_IMPL_QUALCOMM,    "Qualcomm",     cpu_parts_none },
+-      { CPU_IMPL_MARVELL,     "Marvell",      cpu_parts_none },
++      { CPU_IMPL_FUJITSU,     "Fujitsu",      cpu_parts_none },
++      { CPU_IMPL_HISILICON,   "HiSilicon",    cpu_parts_none },
++      { CPU_IMPL_INFINEON,    "IFX",          cpu_parts_none },
+       { CPU_IMPL_INTEL,       "Intel",        cpu_parts_none },
++      { CPU_IMPL_MARVELL,     "Marvell",      cpu_parts_none },
++      { CPU_IMPL_MICROSOFT,   "Microsoft",    cpu_parts_microsoft },
++      { CPU_IMPL_NVIDIA,      "NVIDIA",       cpu_parts_none },
++      { CPU_IMPL_QUALCOMM,    "Qualcomm",     cpu_parts_qcom },
+       CPU_IMPLEMENTER_NONE,
+ };
+ 
+-#ifdef __FreeBSD__
+-static unsigned long os_get_processor_features() {
++void VM_Version::get_os_cpu_info() {
++#if defined(__FreeBSD__) || defined(__OpenBSD__)
++
+   unsigned long auxv = 0;
+-  uint64_t id_aa64isar0, id_aa64pfr0;
++  unsigned long auxv2 = 0;
++  elf_aux_info(AT_HWCAP, &auxv, sizeof(auxv));
++  elf_aux_info(AT_HWCAP2, &auxv2, sizeof(auxv2));
+ 
+-  id_aa64isar0 = READ_SPECIALREG(id_aa64isar0_el1);
+-  id_aa64pfr0 = READ_SPECIALREG(id_aa64pfr0_el1);
++  static_assert(CPU_FP      == HWCAP_FP,      "Flag CPU_FP must follow 
HWCAP");
++  static_assert(CPU_ASIMD   == HWCAP_ASIMD,   "Flag CPU_ASIMD must follow 
HWCAP");
++  static_assert(CPU_EVTSTRM == HWCAP_EVTSTRM, "Flag CPU_EVTSTRM must follow 
HWCAP");
++  static_assert(CPU_AES     == HWCAP_AES,     "Flag CPU_AES must follow 
HWCAP");
++  static_assert(CPU_PMULL   == HWCAP_PMULL,   "Flag CPU_PMULL must follow 
HWCAP");
++  static_assert(CPU_SHA1    == HWCAP_SHA1,    "Flag CPU_SHA1 must follow 
HWCAP");
++  static_assert(CPU_SHA2    == HWCAP_SHA2,    "Flag CPU_SHA2 must follow 
HWCAP");
++  static_assert(CPU_CRC32   == HWCAP_CRC32,   "Flag CPU_CRC32 must follow 
HWCAP");
++  static_assert(CPU_LSE     == HWCAP_ATOMICS, "Flag CPU_LSE must follow 
HWCAP");
++  static_assert(CPU_DCPOP   == HWCAP_DCPOP,   "Flag CPU_DCPOP must follow 
HWCAP");
++  static_assert(CPU_SHA3    == HWCAP_SHA3,    "Flag CPU_SHA3 must follow 
HWCAP");
++  static_assert(CPU_SHA512  == HWCAP_SHA512,  "Flag CPU_SHA512 must follow 
HWCAP");
++  static_assert(CPU_SVE     == HWCAP_SVE,     "Flag CPU_SVE must follow 
HWCAP");
++  static_assert(CPU_PACA    == HWCAP_PACA,    "Flag CPU_PACA must follow 
HWCAP");
++  static_assert(CPU_FPHP    == HWCAP_FPHP,    "Flag CPU_FPHP must follow 
HWCAP");
++  static_assert(CPU_ASIMDHP == HWCAP_ASIMDHP, "Flag CPU_ASIMDHP must follow 
HWCAP");
++  _features = auxv & (
++      HWCAP_FP      |
++      HWCAP_ASIMD   |
++      HWCAP_EVTSTRM |
++      HWCAP_AES     |
++      HWCAP_PMULL   |
++      HWCAP_SHA1    |
++      HWCAP_SHA2    |
++      HWCAP_CRC32   |
++      HWCAP_ATOMICS |
++      HWCAP_DCPOP   |
++      HWCAP_SHA3    |
++      HWCAP_SHA512  |
++      HWCAP_SVE     |
++      HWCAP_PACA    |
++      HWCAP_FPHP    |
++      HWCAP_ASIMDHP);
+ 
+-  if (ID_AA64ISAR0_AES_VAL(id_aa64isar0) == ID_AA64ISAR0_AES_BASE) {
+-    auxv = auxv | HWCAP_AES;
+-  }
++  if (auxv2 & HWCAP2_SVE2) _features |= CPU_SVE2;
++  if (auxv2 & HWCAP2_SVEBITPERM) _features |= CPU_SVEBITPERM;
+ 
+-  if (ID_AA64ISAR0_AES_VAL(id_aa64isar0) == ID_AA64ISAR0_AES_PMULL) {
+-    auxv = auxv | HWCAP_PMULL;
+-  }
+-
+-  if (ID_AA64ISAR0_SHA1_VAL(id_aa64isar0) == ID_AA64ISAR0_SHA1_BASE) {
+-    auxv = auxv | HWCAP_SHA1;
+-  }
+-
+-  if (ID_AA64ISAR0_SHA2_VAL(id_aa64isar0) == ID_AA64ISAR0_SHA2_BASE) {
+-    auxv = auxv | HWCAP_SHA2;
+-  }
+-
+-  if (ID_AA64ISAR0_CRC32_VAL(id_aa64isar0) == ID_AA64ISAR0_CRC32_BASE) {
+-    auxv = auxv | HWCAP_CRC32;
+-  }
+-
+-  if (ID_AA64PFR0_AdvSIMD(id_aa64pfr0) == ID_AA64PFR0_AdvSIMD_IMPL || \
+-      ID_AA64PFR0_AdvSIMD(id_aa64pfr0) == ID_AA64PFR0_AdvSIMD_HP ) {
+-    auxv = auxv | HWCAP_ASIMD;
+-  }
+-
+-  return auxv;
+-}
+-#endif
+-
+-void VM_Version::get_os_cpu_info() {
+-#ifdef __OpenBSD__
+-  // READ_SPECIALREG is not available from userland on OpenBSD.
+-  // Hardcode these values to the "lowest common denominator"
+-  _cpu = CPU_IMPL_ARM;
+-  _model = CPU_PART_CORTEX_A53;
+-  _variant = 0;
+-  _revision = 0;
+-  _features = HWCAP_ASIMD;
+-#elif defined(__FreeBSD__)
+-  struct cpu_desc cpu_desc[1];
+-  struct cpu_desc user_cpu_desc;
+-
+   uint32_t midr;
+   uint32_t impl_id;
+   uint32_t part_id;
+-  uint32_t cpu = 0;
+   size_t i;
+   const struct cpu_parts *cpu_partsp = nullptr;
+ 
+@@ -372,8 +447,7 @@ void VM_Version::get_os_cpu_info() {
+   for (i = 0; i < nitems(cpu_implementers); i++) {
+     if (impl_id == cpu_implementers[i].impl_id ||
+       cpu_implementers[i].impl_id == 0) {
+-      cpu_desc[cpu].cpu_impl = impl_id;
+-      cpu_desc[cpu].cpu_impl_name = cpu_implementers[i].impl_name;
++      _cpu = impl_id;
+       cpu_partsp = cpu_implementers[i].cpu_parts;
+       break;
+     }
+@@ -381,36 +455,13 @@ void VM_Version::get_os_cpu_info() {
+   part_id = CPU_PART(midr);
+   for (i = 0; &cpu_partsp[i] != nullptr; i++) {
+     if (part_id == cpu_partsp[i].part_id || cpu_partsp[i].part_id == 0) {
+-      cpu_desc[cpu].cpu_part_num = part_id;
+-      cpu_desc[cpu].cpu_part_name = cpu_partsp[i].part_name;
++      _model = part_id;
+       break;
+     }
+   }
+ 
+-  cpu_desc[cpu].cpu_revision = CPU_REV(midr);
+-  cpu_desc[cpu].cpu_variant = CPU_VAR(midr);
+-
+-  _cpu = cpu_desc[cpu].cpu_impl;
+-  _variant = cpu_desc[cpu].cpu_variant;
+-  _model = cpu_desc[cpu].cpu_part_num;
+-  _revision = cpu_desc[cpu].cpu_revision;
+-
+-  uint64_t auxv = os_get_processor_features();
+-
+-  _features = auxv & (
+-      HWCAP_FP      |
+-      HWCAP_ASIMD   |
+-      HWCAP_EVTSTRM |
+-      HWCAP_AES     |
+-      HWCAP_PMULL   |
+-      HWCAP_SHA1    |
+-      HWCAP_SHA2    |
+-      HWCAP_CRC32   |
+-      HWCAP_ATOMICS |
+-      HWCAP_DCPOP   |
+-      HWCAP_SHA3    |
+-      HWCAP_SHA512  |
+-      HWCAP_SVE);
++  _variant = CPU_VAR(midr);
++  _revision = CPU_REV(midr);
+ #endif
+ 
+   uint64_t ctr_el0;

Reply via email to