https://github.com/Ayush3941 created 
https://github.com/llvm/llvm-project/pull/186119

This patch upstreams handling of AArch64 Neon extract-lane builtins in ClangIR, 
covering the vget_lane_* and vgetq_lane_* .

It also migrates the corresponding AArch64 Neon CodeGen tests under the neon 
test directory for CIR coverage.

>From 2745d339bc2eec3d3dc9a548d2cf70c49cd9215f Mon Sep 17 00:00:00 2001
From: Ayush3941 <[email protected]>
Date: Thu, 12 Mar 2026 09:31:43 -0400
Subject: [PATCH 1/2] [CIR][AArch64] Upstream Neon vget_lane/vgetq_lane
 builtins

---
 .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp  |  82 +++++
 clang/test/CodeGen/AArch64/neon/neon-vget.c   | 302 ++++++++++++++++++
 2 files changed, 384 insertions(+)
 create mode 100644 clang/test/CodeGen/AArch64/neon/neon-vget.c

diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp 
b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp
index 5534e69b5f8bc..107271893844d 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp
@@ -2724,33 +2724,115 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned 
builtinID, const CallExpr *expr,
   case NEON::BI__builtin_neon_vset_lane_mf8:
   case NEON::BI__builtin_neon_vsetq_lane_mf8:
   case NEON::BI__builtin_neon_vsetq_lane_f64:
+    cgm.errorNYI(expr->getSourceRange(),
+                 std::string("unimplemented AArch64 builtin call: ") +
+                     getContext().BuiltinInfo.getName(builtinID));
+    return mlir::Value{};
+
   case NEON::BI__builtin_neon_vget_lane_i8:
+    ops[0] = builder.createBitcast(ops[0], cir::VectorType::get(uInt8Ty, 8));
+    return cir::VecExtractOp::create(builder, getLoc(expr->getExprLoc()), 
ops[0],
+                                   emitScalarExpr(expr->getArg(1)));
   case NEON::BI__builtin_neon_vdupb_lane_i8:
+    cgm.errorNYI(expr->getSourceRange(),
+                 std::string("unimplemented AArch64 builtin call: ") +
+                     getContext().BuiltinInfo.getName(builtinID));
+    return mlir::Value{};
+
   case NEON::BI__builtin_neon_vgetq_lane_i8:
+    ops[0] = builder.createBitcast(ops[0], cir::VectorType::get(uInt8Ty, 16));
+    return cir::VecExtractOp::create(builder, getLoc(expr->getExprLoc()), 
ops[0],
+                                   emitScalarExpr(expr->getArg(1)));
   case NEON::BI__builtin_neon_vdupb_laneq_i8:
+    cgm.errorNYI(expr->getSourceRange(),
+                 std::string("unimplemented AArch64 builtin call: ") +
+                     getContext().BuiltinInfo.getName(builtinID));
+    return mlir::Value{};
   case NEON::BI__builtin_neon_vget_lane_mf8:
   case NEON::BI__builtin_neon_vdupb_lane_mf8:
   case NEON::BI__builtin_neon_vgetq_lane_mf8:
   case NEON::BI__builtin_neon_vdupb_laneq_mf8:
+    cgm.errorNYI(expr->getSourceRange(),
+                 std::string("unimplemented AArch64 builtin call: ") +
+                     getContext().BuiltinInfo.getName(builtinID));
+    return mlir::Value{};
   case NEON::BI__builtin_neon_vget_lane_i16:
+    ops[0] = builder.createBitcast(ops[0], cir::VectorType::get(uInt16Ty, 4));
+    return cir::VecExtractOp::create(builder, getLoc(expr->getExprLoc()), 
ops[0],
+                                   emitScalarExpr(expr->getArg(1)));
   case NEON::BI__builtin_neon_vduph_lane_i16:
+    cgm.errorNYI(expr->getSourceRange(),
+                 std::string("unimplemented AArch64 builtin call: ") +
+                     getContext().BuiltinInfo.getName(builtinID));
+    return mlir::Value{};
   case NEON::BI__builtin_neon_vgetq_lane_i16:
+    ops[0] = builder.createBitcast(ops[0], cir::VectorType::get(uInt16Ty, 8));
+    return cir::VecExtractOp::create(builder, getLoc(expr->getExprLoc()), 
ops[0],
+                                   emitScalarExpr(expr->getArg(1)));
   case NEON::BI__builtin_neon_vduph_laneq_i16:
+    cgm.errorNYI(expr->getSourceRange(),
+                 std::string("unimplemented AArch64 builtin call: ") +
+                     getContext().BuiltinInfo.getName(builtinID));
+    return mlir::Value{};
   case NEON::BI__builtin_neon_vget_lane_i32:
+    ops[0] = builder.createBitcast(ops[0], cir::VectorType::get(uInt32Ty, 2));
+    return cir::VecExtractOp::create(builder, getLoc(expr->getExprLoc()), 
ops[0],
+                                   emitScalarExpr(expr->getArg(1)));
   case NEON::BI__builtin_neon_vdups_lane_i32:
   case NEON::BI__builtin_neon_vdups_lane_f32:
+    cgm.errorNYI(expr->getSourceRange(),
+                 std::string("unimplemented AArch64 builtin call: ") +
+                     getContext().BuiltinInfo.getName(builtinID));
+    return mlir::Value{};
   case NEON::BI__builtin_neon_vgetq_lane_i32:
+    ops[0] = builder.createBitcast(ops[0], cir::VectorType::get(uInt32Ty, 4));
+    return cir::VecExtractOp::create(builder, getLoc(expr->getExprLoc()), 
ops[0],
+                                   emitScalarExpr(expr->getArg(1)));
   case NEON::BI__builtin_neon_vdups_laneq_i32:
+    cgm.errorNYI(expr->getSourceRange(),
+                 std::string("unimplemented AArch64 builtin call: ") +
+                     getContext().BuiltinInfo.getName(builtinID));
+    return mlir::Value{};
   case NEON::BI__builtin_neon_vget_lane_i64:
+    ops[0] = builder.createBitcast(ops[0], cir::VectorType::get(uInt64Ty, 1));
+    return cir::VecExtractOp::create(builder, getLoc(expr->getExprLoc()), 
ops[0],
+                                   emitScalarExpr(expr->getArg(1)));
   case NEON::BI__builtin_neon_vdupd_lane_i64:
   case NEON::BI__builtin_neon_vdupd_lane_f64:
+    cgm.errorNYI(expr->getSourceRange(),
+                 std::string("unimplemented AArch64 builtin call: ") +
+                     getContext().BuiltinInfo.getName(builtinID));
+    return mlir::Value{};
   case NEON::BI__builtin_neon_vgetq_lane_i64:
+    ops[0] = builder.createBitcast(ops[0], cir::VectorType::get(uInt64Ty, 2));
+    return cir::VecExtractOp::create(builder, getLoc(expr->getExprLoc()), 
ops[0],
+                                   emitScalarExpr(expr->getArg(1)));
   case NEON::BI__builtin_neon_vdupd_laneq_i64:
+    cgm.errorNYI(expr->getSourceRange(),
+                 std::string("unimplemented AArch64 builtin call: ") +
+                     getContext().BuiltinInfo.getName(builtinID));
+    return mlir::Value{};
   case NEON::BI__builtin_neon_vget_lane_f32:
+    ops[0] = builder.createBitcast(ops[0], cir::VectorType::get(floatTy, 2));
+    return cir::VecExtractOp::create(builder, getLoc(expr->getExprLoc()), 
ops[0],
+                                   emitScalarExpr(expr->getArg(1)));
   case NEON::BI__builtin_neon_vget_lane_f64:
+    ops[0] = builder.createBitcast(ops[0], cir::VectorType::get(doubleTy, 1));
+    return cir::VecExtractOp::create(builder, getLoc(expr->getExprLoc()), 
ops[0],
+                                   emitScalarExpr(expr->getArg(1)));
   case NEON::BI__builtin_neon_vgetq_lane_f32:
+    ops[0] = builder.createBitcast(ops[0], cir::VectorType::get(floatTy, 4));
+    return cir::VecExtractOp::create(builder, getLoc(expr->getExprLoc()), 
ops[0],
+                                   emitScalarExpr(expr->getArg(1)));
   case NEON::BI__builtin_neon_vdups_laneq_f32:
+    cgm.errorNYI(expr->getSourceRange(),
+                 std::string("unimplemented AArch64 builtin call: ") +
+                     getContext().BuiltinInfo.getName(builtinID));
+    return mlir::Value{};
   case NEON::BI__builtin_neon_vgetq_lane_f64:
+    ops[0] = builder.createBitcast(ops[0], cir::VectorType::get(doubleTy, 2));
+    return cir::VecExtractOp::create(builder, getLoc(expr->getExprLoc()), 
ops[0],
+                                   emitScalarExpr(expr->getArg(1)));
   case NEON::BI__builtin_neon_vdupd_laneq_f64:
   case NEON::BI__builtin_neon_vaddh_f16:
   case NEON::BI__builtin_neon_vsubh_f16:
diff --git a/clang/test/CodeGen/AArch64/neon/neon-vget.c 
b/clang/test/CodeGen/AArch64/neon/neon-vget.c
new file mode 100644
index 0000000000000..297a07ced2f41
--- /dev/null
+++ b/clang/test/CodeGen/AArch64/neon/neon-vget.c
@@ -0,0 +1,302 @@
+// REQUIRES: aarch64-registered-target || arm-registered-target
+
+// RUN:                   %clang_cc1 -triple arm64-none-linux-gnu 
-target-feature +neon -disable-O0-optnone -flax-vector-conversions=none         
  -emit-llvm -o - %s | opt -S -passes=mem2reg,sroa | FileCheck %s 
--check-prefixes=LLVM
+// RUN: %if cir-enabled %{%clang_cc1 -triple arm64-none-linux-gnu 
-target-feature +neon -disable-O0-optnone -flax-vector-conversions=none 
-fclangir -emit-llvm -o - %s | opt -S -passes=mem2reg,sroa | FileCheck %s 
--check-prefixes=LLVM %}
+// RUN: %if cir-enabled %{%clang_cc1 -triple arm64-none-linux-gnu 
-target-feature +neon -disable-O0-optnone -flax-vector-conversions=none 
-fclangir -emit-cir  -o - %s |                               FileCheck %s 
--check-prefixes=CIR %}
+
+//=============================================================================
+// NOTES
+//
+// This file contains tests that were originally located in
+//  * clang/test/CodeGen/AArch64/neon-vget.c
+//  * clang/test/CodeGen/AArch64/poly64.c
+// The main difference is the use of RUN lines that enable ClangIR lowering;
+// therefore only builtins currently supported by ClangIR are tested here.
+//
+// The half-precision extract variants are intentionally omitted here because
+// they lower through the vduph_* builtin IDs, which are still unsupported in
+// CIR for this patch.
+//
+//=============================================================================
+
+#include <arm_neon.h>
+
+//===------------------------------------------------------===//
+// Extract one element from vector
+//===------------------------------------------------------===//
+
+// LLVM-LABEL: @test_vget_lane_u8(
+// CIR-LABEL: @test_vget_lane_u8(
+uint8_t test_vget_lane_u8(uint8x8_t a) {
+// CIR: cir.vec.extract %{{.*}}[%{{.*}} : {{.*}}] : !cir.vector<8 x !u8i>
+
+// LLVM: [[VGET_LANE:%.*]] = extractelement <8 x i8> %{{.*}}, i32 7
+// LLVM: ret i8 [[VGET_LANE]]
+  return vget_lane_u8(a, 7);
+}
+
+// LLVM-LABEL: @test_vget_lane_u16(
+// CIR-LABEL: @test_vget_lane_u16(
+uint16_t test_vget_lane_u16(uint16x4_t a) {
+// CIR: [[V:%.*]] = cir.cast bitcast %{{.*}} : {{.*}} -> !cir.vector<4 x !u16i>
+// CIR: cir.vec.extract [[V]][%{{.*}} : {{.*}}] : !cir.vector<4 x !u16i>
+
+// LLVM: [[VGET_LANE:%.*]] = extractelement <4 x i16> %{{.*}}, i32 3
+// LLVM: ret i16 [[VGET_LANE]]
+  return vget_lane_u16(a, 3);
+}
+
+// LLVM-LABEL: @test_vget_lane_u32(
+// CIR-LABEL: @test_vget_lane_u32(
+uint32_t test_vget_lane_u32(uint32x2_t a) {
+// CIR: [[V:%.*]] = cir.cast bitcast %{{.*}} : {{.*}} -> !cir.vector<2 x !u32i>
+// CIR: cir.vec.extract [[V]][%{{.*}} : {{.*}}] : !cir.vector<2 x !u32i>
+
+// LLVM: [[VGET_LANE:%.*]] = extractelement <2 x i32> %{{.*}}, i32 1
+// LLVM: ret i32 [[VGET_LANE]]
+  return vget_lane_u32(a, 1);
+}
+
+// LLVM-LABEL: @test_vget_lane_s8(
+// CIR-LABEL: @test_vget_lane_s8(
+int8_t test_vget_lane_s8(int8x8_t a) {
+// CIR: cir.vec.extract %{{.*}}[%{{.*}} : {{.*}}] : !cir.vector<8 x !u8i>
+
+// LLVM: [[VGET_LANE:%.*]] = extractelement <8 x i8> %{{.*}}, i32 7
+// LLVM: ret i8 [[VGET_LANE]]
+  return vget_lane_s8(a, 7);
+}
+
+// LLVM-LABEL: @test_vget_lane_s16(
+// CIR-LABEL: @test_vget_lane_s16(
+int16_t test_vget_lane_s16(int16x4_t a) {
+// CIR: [[V:%.*]] = cir.cast bitcast %{{.*}} : {{.*}} -> !cir.vector<4 x !u16i>
+// CIR: cir.vec.extract [[V]][%{{.*}} : {{.*}}] : !cir.vector<4 x !u16i>
+
+// LLVM: [[VGET_LANE:%.*]] = extractelement <4 x i16> %{{.*}}, i32 3
+// LLVM: ret i16 [[VGET_LANE]]
+  return vget_lane_s16(a, 3);
+}
+
+// LLVM-LABEL: @test_vget_lane_s32(
+// CIR-LABEL: @test_vget_lane_s32(
+int32_t test_vget_lane_s32(int32x2_t a) {
+// CIR: [[V:%.*]] = cir.cast bitcast %{{.*}} : {{.*}} -> !cir.vector<2 x !u32i>
+// CIR: cir.vec.extract [[V]][%{{.*}} : {{.*}}] : !cir.vector<2 x !u32i>
+
+// LLVM: [[VGET_LANE:%.*]] = extractelement <2 x i32> %{{.*}}, i32 1
+// LLVM: ret i32 [[VGET_LANE]]
+  return vget_lane_s32(a, 1);
+}
+
+// LLVM-LABEL: @test_vget_lane_p8(
+// CIR-LABEL: @test_vget_lane_p8(
+poly8_t test_vget_lane_p8(poly8x8_t a) {
+// CIR: cir.vec.extract %{{.*}}[%{{.*}} : {{.*}}] : !cir.vector<8 x !u8i>
+
+// LLVM: [[VGET_LANE:%.*]] = extractelement <8 x i8> %{{.*}}, i32 7
+// LLVM: ret i8 [[VGET_LANE]]
+  return vget_lane_p8(a, 7);
+}
+
+// LLVM-LABEL: @test_vget_lane_p16(
+// CIR-LABEL: @test_vget_lane_p16(
+poly16_t test_vget_lane_p16(poly16x4_t a) {
+// CIR: [[V:%.*]] = cir.cast bitcast %{{.*}} : {{.*}} -> !cir.vector<4 x !u16i>
+// CIR: cir.vec.extract [[V]][%{{.*}} : {{.*}}] : !cir.vector<4 x !u16i>
+
+// LLVM: [[VGET_LANE:%.*]] = extractelement <4 x i16> %{{.*}}, i32 3
+// LLVM: ret i16 [[VGET_LANE]]
+  return vget_lane_p16(a, 3);
+}
+
+// LLVM-LABEL: @test_vget_lane_f32(
+// CIR-LABEL: @test_vget_lane_f32(
+float32_t test_vget_lane_f32(float32x2_t a) {
+// CIR: cir.vec.extract %{{.*}}[%{{.*}} : {{.*}}] : !cir.vector<2 x !cir.float>
+
+// LLVM: [[VGET_LANE:%.*]] = extractelement <2 x float> %{{.*}}, i32 1
+// LLVM: ret float [[VGET_LANE]]
+  return vget_lane_f32(a, 1);
+}
+
+// LLVM-LABEL: @test_vget_lane_f64(
+// CIR-LABEL: @test_vget_lane_f64(
+float64_t test_vget_lane_f64(float64x1_t a) {
+// CIR: cir.vec.extract %{{.*}}[%{{.*}} : {{.*}}] : !cir.vector<1 x 
!cir.double>
+
+// LLVM: [[VGET_LANE:%.*]] = extractelement <1 x double> %{{.*}}, i32 0
+// LLVM: ret double [[VGET_LANE]]
+  return vget_lane_f64(a, 0);
+}
+
+// LLVM-LABEL: @test_vgetq_lane_u8(
+// CIR-LABEL: @test_vgetq_lane_u8(
+uint8_t test_vgetq_lane_u8(uint8x16_t a) {
+// CIR: cir.vec.extract %{{.*}}[%{{.*}} : {{.*}}] : !cir.vector<16 x !u8i>
+
+// LLVM: [[VGETQ_LANE:%.*]] = extractelement <16 x i8> %{{.*}}, i32 15
+// LLVM: ret i8 [[VGETQ_LANE]]
+  return vgetq_lane_u8(a, 15);
+}
+
+// LLVM-LABEL: @test_vgetq_lane_u16(
+// CIR-LABEL: @test_vgetq_lane_u16(
+uint16_t test_vgetq_lane_u16(uint16x8_t a) {
+// CIR: [[V:%.*]] = cir.cast bitcast %{{.*}} : {{.*}} -> !cir.vector<8 x !u16i>
+// CIR: cir.vec.extract [[V]][%{{.*}} : {{.*}}] : !cir.vector<8 x !u16i>
+
+// LLVM: [[VGETQ_LANE:%.*]] = extractelement <8 x i16> %{{.*}}, i32 7
+// LLVM: ret i16 [[VGETQ_LANE]]
+  return vgetq_lane_u16(a, 7);
+}
+
+// LLVM-LABEL: @test_vgetq_lane_u32(
+// CIR-LABEL: @test_vgetq_lane_u32(
+uint32_t test_vgetq_lane_u32(uint32x4_t a) {
+// CIR: [[V:%.*]] = cir.cast bitcast %{{.*}} : {{.*}} -> !cir.vector<4 x !u32i>
+// CIR: cir.vec.extract [[V]][%{{.*}} : {{.*}}] : !cir.vector<4 x !u32i>
+
+// LLVM: [[VGETQ_LANE:%.*]] = extractelement <4 x i32> %{{.*}}, i32 3
+// LLVM: ret i32 [[VGETQ_LANE]]
+  return vgetq_lane_u32(a, 3);
+}
+
+// LLVM-LABEL: @test_vgetq_lane_s8(
+// CIR-LABEL: @test_vgetq_lane_s8(
+int8_t test_vgetq_lane_s8(int8x16_t a) {
+// CIR: cir.vec.extract %{{.*}}[%{{.*}} : {{.*}}] : !cir.vector<16 x !u8i>
+
+// LLVM: [[VGETQ_LANE:%.*]] = extractelement <16 x i8> %{{.*}}, i32 15
+// LLVM: ret i8 [[VGETQ_LANE]]
+  return vgetq_lane_s8(a, 15);
+}
+
+// LLVM-LABEL: @test_vgetq_lane_s16(
+// CIR-LABEL: @test_vgetq_lane_s16(
+int16_t test_vgetq_lane_s16(int16x8_t a) {
+// CIR: [[V:%.*]] = cir.cast bitcast %{{.*}} : {{.*}} -> !cir.vector<8 x !u16i>
+// CIR: cir.vec.extract [[V]][%{{.*}} : {{.*}}] : !cir.vector<8 x !u16i>
+
+// LLVM: [[VGETQ_LANE:%.*]] = extractelement <8 x i16> %{{.*}}, i32 7
+// LLVM: ret i16 [[VGETQ_LANE]]
+  return vgetq_lane_s16(a, 7);
+}
+
+// LLVM-LABEL: @test_vgetq_lane_s32(
+// CIR-LABEL: @test_vgetq_lane_s32(
+int32_t test_vgetq_lane_s32(int32x4_t a) {
+// CIR: [[V:%.*]] = cir.cast bitcast %{{.*}} : {{.*}} -> !cir.vector<4 x !u32i>
+// CIR: cir.vec.extract [[V]][%{{.*}} : {{.*}}] : !cir.vector<4 x !u32i>
+
+// LLVM: [[VGETQ_LANE:%.*]] = extractelement <4 x i32> %{{.*}}, i32 3
+// LLVM: ret i32 [[VGETQ_LANE]]
+  return vgetq_lane_s32(a, 3);
+}
+
+// LLVM-LABEL: @test_vgetq_lane_p8(
+// CIR-LABEL: @test_vgetq_lane_p8(
+poly8_t test_vgetq_lane_p8(poly8x16_t a) {
+// CIR: cir.vec.extract %{{.*}}[%{{.*}} : {{.*}}] : !cir.vector<16 x !u8i>
+
+// LLVM: [[VGETQ_LANE:%.*]] = extractelement <16 x i8> %{{.*}}, i32 15
+// LLVM: ret i8 [[VGETQ_LANE]]
+  return vgetq_lane_p8(a, 15);
+}
+
+// LLVM-LABEL: @test_vgetq_lane_p16(
+// CIR-LABEL: @test_vgetq_lane_p16(
+poly16_t test_vgetq_lane_p16(poly16x8_t a) {
+// CIR: [[V:%.*]] = cir.cast bitcast %{{.*}} : {{.*}} -> !cir.vector<8 x !u16i>
+// CIR: cir.vec.extract [[V]][%{{.*}} : {{.*}}] : !cir.vector<8 x !u16i>
+
+// LLVM: [[VGETQ_LANE:%.*]] = extractelement <8 x i16> %{{.*}}, i32 7
+// LLVM: ret i16 [[VGETQ_LANE]]
+  return vgetq_lane_p16(a, 7);
+}
+
+// LLVM-LABEL: @test_vgetq_lane_f32(
+// CIR-LABEL: @test_vgetq_lane_f32(
+float32_t test_vgetq_lane_f32(float32x4_t a) {
+// CIR: cir.vec.extract %{{.*}}[%{{.*}} : {{.*}}] : !cir.vector<4 x !cir.float>
+
+// LLVM: [[VGETQ_LANE:%.*]] = extractelement <4 x float> %{{.*}}, i32 3
+// LLVM: ret float [[VGETQ_LANE]]
+  return vgetq_lane_f32(a, 3);
+}
+
+// LLVM-LABEL: @test_vgetq_lane_f64(
+// CIR-LABEL: @test_vgetq_lane_f64(
+float64_t test_vgetq_lane_f64(float64x2_t a) {
+// CIR: cir.vec.extract %{{.*}}[%{{.*}} : {{.*}}] : !cir.vector<2 x 
!cir.double>
+
+// LLVM: [[VGETQ_LANE:%.*]] = extractelement <2 x double> %{{.*}}, i32 1
+// LLVM: ret double [[VGETQ_LANE]]
+  return vgetq_lane_f64(a, 1);
+}
+
+// LLVM-LABEL: @test_vget_lane_s64(
+// CIR-LABEL: @test_vget_lane_s64(
+int64_t test_vget_lane_s64(int64x1_t a) {
+// CIR: [[V:%.*]] = cir.cast bitcast %{{.*}} : {{.*}} -> !cir.vector<1 x !u64i>
+// CIR: cir.vec.extract [[V]][%{{.*}} : {{.*}}] : !cir.vector<1 x !u64i>
+
+// LLVM: [[VGET_LANE:%.*]] = extractelement <1 x i64> %{{.*}}, i32 0
+// LLVM: ret i64 [[VGET_LANE]]
+  return vget_lane_s64(a, 0);
+}
+
+// LLVM-LABEL: @test_vget_lane_u64(
+// CIR-LABEL: @test_vget_lane_u64(
+uint64_t test_vget_lane_u64(uint64x1_t a) {
+// CIR: [[V:%.*]] = cir.cast bitcast %{{.*}} : {{.*}} -> !cir.vector<1 x !u64i>
+// CIR: cir.vec.extract [[V]][%{{.*}} : {{.*}}] : !cir.vector<1 x !u64i>
+
+// LLVM: [[VGET_LANE:%.*]] = extractelement <1 x i64> %{{.*}}, i32 0
+// LLVM: ret i64 [[VGET_LANE]]
+  return vget_lane_u64(a, 0);
+}
+
+// LLVM-LABEL: @test_vget_lane_p64(
+// CIR-LABEL: @test_vget_lane_p64(
+poly64_t test_vget_lane_p64(poly64x1_t v) {
+// CIR: [[V:%.*]] = cir.cast bitcast %{{.*}} : {{.*}} -> !cir.vector<1 x !u64i>
+// CIR: cir.vec.extract [[V]][%{{.*}} : {{.*}}] : !cir.vector<1 x !u64i>
+
+// LLVM: [[VGET_LANE:%.*]] = extractelement <1 x i64> %{{.*}}, i32 0
+// LLVM: ret i64 [[VGET_LANE]]
+  return vget_lane_p64(v, 0);
+}
+
+// LLVM-LABEL: @test_vgetq_lane_s64(
+// CIR-LABEL: @test_vgetq_lane_s64(
+int64_t test_vgetq_lane_s64(int64x2_t a) {
+// CIR: [[V:%.*]] = cir.cast bitcast %{{.*}} : {{.*}} -> !cir.vector<2 x !u64i>
+// CIR: cir.vec.extract [[V]][%{{.*}} : {{.*}}] : !cir.vector<2 x !u64i>
+
+// LLVM: [[VGETQ_LANE:%.*]] = extractelement <2 x i64> %{{.*}}, i32 1
+// LLVM: ret i64 [[VGETQ_LANE]]
+  return vgetq_lane_s64(a, 1);
+}
+
+// LLVM-LABEL: @test_vgetq_lane_u64(
+// CIR-LABEL: @test_vgetq_lane_u64(
+uint64_t test_vgetq_lane_u64(uint64x2_t a) {
+// CIR: [[V:%.*]] = cir.cast bitcast %{{.*}} : {{.*}} -> !cir.vector<2 x !u64i>
+// CIR: cir.vec.extract [[V]][%{{.*}} : {{.*}}] : !cir.vector<2 x !u64i>
+
+// LLVM: [[VGETQ_LANE:%.*]] = extractelement <2 x i64> %{{.*}}, i32 1
+// LLVM: ret i64 [[VGETQ_LANE]]
+  return vgetq_lane_u64(a, 1);
+}
+
+// LLVM-LABEL: @test_vgetq_lane_p64(
+// CIR-LABEL: @test_vgetq_lane_p64(
+poly64_t test_vgetq_lane_p64(poly64x2_t v) {
+// CIR: [[V:%.*]] = cir.cast bitcast %{{.*}} : {{.*}} -> !cir.vector<2 x !u64i>
+// CIR: cir.vec.extract [[V]][%{{.*}} : {{.*}}] : !cir.vector<2 x !u64i>
+
+// LLVM: [[VGETQ_LANE:%.*]] = extractelement <2 x i64> %{{.*}}, i32 1
+// LLVM: ret i64 [[VGETQ_LANE]]
+  return vgetq_lane_p64(v, 1);
+}

>From 42e2e3e20b7921fb7d79c6fea2522314e0ffd3d7 Mon Sep 17 00:00:00 2001
From: Ayush3941 <[email protected]>
Date: Thu, 12 Mar 2026 09:45:55 -0400
Subject: [PATCH 2/2] [CIR][AArch64] Upstream Neon vget_lane/vgetq_lane
 builtins v2

---
 clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 4 ----
 1 file changed, 4 deletions(-)

diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp 
b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp
index 107271893844d..275bf56411d87 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp
@@ -2744,10 +2744,6 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned 
builtinID, const CallExpr *expr,
     return cir::VecExtractOp::create(builder, getLoc(expr->getExprLoc()), 
ops[0],
                                    emitScalarExpr(expr->getArg(1)));
   case NEON::BI__builtin_neon_vdupb_laneq_i8:
-    cgm.errorNYI(expr->getSourceRange(),
-                 std::string("unimplemented AArch64 builtin call: ") +
-                     getContext().BuiltinInfo.getName(builtinID));
-    return mlir::Value{};
   case NEON::BI__builtin_neon_vget_lane_mf8:
   case NEON::BI__builtin_neon_vdupb_lane_mf8:
   case NEON::BI__builtin_neon_vgetq_lane_mf8:

_______________________________________________
cfe-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to