Hi,
Attached patch is to implement AArch64 SISD intrinsics for vget_high and
vget_low.
--
Thanks,
-Jiangning
diff --git a/include/clang/Basic/arm_neon.td b/include/clang/Basic/arm_neon.td
old mode 100644
new mode 100755
index e2e3d29..63a7706
--- a/include/clang/Basic/arm_neon.td
+++ b/include/clang/Basic/arm_neon.td
@@ -962,6 +962,13 @@ def SCALAR_SUB : SInst<"vsub", "sss", "SlSUl">;
// Scalar Saturating Sub
def SCALAR_QSUB : SInst<"vqsub", "sss", "ScSsSiSlSUcSUsSUiSUl">;
+let InstName = "vmov" in {
+def VGET_HIGH_A64 : NoTestOpInst<"vget_high", "dk", "csilhfdUcUsUiUlPcPsPl",
+ OP_HI>;
+def VGET_LOW_A64 : NoTestOpInst<"vget_low", "dk", "csilhfdUcUsUiUlPcPsPl",
+ OP_LO>;
+}
+
////////////////////////////////////////////////////////////////////////////////
// Scalar Shift
// Scalar Shift Left
diff --git a/test/CodeGen/aarch64-neon-vget-hilo.c b/test/CodeGen/aarch64-neon-vget-hilo.c
new file mode 100644
index 0000000..9edd31a
--- /dev/null
+++ b/test/CodeGen/aarch64-neon-vget-hilo.c
@@ -0,0 +1,176 @@
+// REQUIRES: aarch64-registered-target
+// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +neon \
+// RUN: -ffp-contract=fast -S -O3 -o - %s | FileCheck %s
+
+// Test new aarch64 intrinsics and types
+
+#include <arm_neon.h>
+
+int8x8_t test_vget_high_s8(int8x16_t a) {
+ // CHECK: test_vget_high_s8
+ return vget_high_s8(a);
+ // CHECK: dup d0, {{v[0-9]+}}.d[1]
+}
+
+int16x4_t test_vget_high_s16(int16x8_t a) {
+ // CHECK: test_vget_high_s16
+ return vget_high_s16(a);
+ // CHECK: dup d0, {{v[0-9]+}}.d[1]
+}
+
+int32x2_t test_vget_high_s32(int32x4_t a) {
+ // CHECK: test_vget_high_s32
+ return vget_high_s32(a);
+ // CHECK: dup d0, {{v[0-9]+}}.d[1]
+}
+
+int64x1_t test_vget_high_s64(int64x2_t a) {
+ // CHECK: test_vget_high_s64
+ return vget_high_s64(a);
+ // CHECK: dup d0, {{v[0-9]+}}.d[1]
+}
+
+uint8x8_t test_vget_high_u8(uint8x16_t a) {
+ // CHECK: test_vget_high_u8
+ return vget_high_u8(a);
+ // CHECK: dup d0, {{v[0-9]+}}.d[1]
+}
+
+uint16x4_t test_vget_high_u16(uint16x8_t a) {
+ // CHECK: test_vget_high_u16
+ return vget_high_u16(a);
+ // CHECK: dup d0, {{v[0-9]+}}.d[1]
+}
+
+uint32x2_t test_vget_high_u32(uint32x4_t a) {
+ // CHECK: test_vget_high_u32
+ return vget_high_u32(a);
+ // CHECK: dup d0, {{v[0-9]+}}.d[1]
+}
+
+uint64x1_t test_vget_high_u64(uint64x2_t a) {
+ // CHECK: test_vget_high_u64
+ return vget_high_u64(a);
+ // CHECK: dup d0, {{v[0-9]+}}.d[1]
+}
+
+poly64x1_t test_vget_high_p64(poly64x2_t a) {
+ // CHECK: test_vget_high_p64
+ return vget_high_p64(a);
+ // CHECK: dup d0, {{v[0-9]+}}.d[1]
+}
+
+float16x4_t test_vget_high_f16(float16x8_t a) {
+ // CHECK: test_vget_high_f16
+ return vget_high_f16(a);
+ // CHECK: dup d0, {{v[0-9]+}}.d[1]
+}
+
+float32x2_t test_vget_high_f32(float32x4_t a) {
+ // CHECK: test_vget_high_f32
+ return vget_high_f32(a);
+ // CHECK: dup d0, {{v[0-9]+}}.d[1]
+}
+
+poly8x8_t test_vget_high_p8(poly8x16_t a) {
+ // CHECK: test_vget_high_p8
+ return vget_high_p8(a);
+ // CHECK: dup d0, {{v[0-9]+}}.d[1]
+}
+
+poly16x4_t test_vget_high_p16(poly16x8_t a) {
+ // CHECK: test_vget_high_p16
+ return vget_high_p16(a);
+ // CHECK: dup d0, {{v[0-9]+}}.d[1]
+}
+
+float64x1_t test_vget_high_f64(float64x2_t a) {
+ // CHECK: test_vget_high_f64
+ return vget_high_f64(a);
+ // CHECK: dup d0, {{v[0-9]+}}.d[1]
+}
+
+int8x8_t test_vget_low_s8(int8x16_t a) {
+ // CHECK: test_vget_low_s8
+ return vget_low_s8(a);
+ // CHECK: dup d0, {{v[0-9]+}}.d[0]
+}
+
+int16x4_t test_vget_low_s16(int16x8_t a) {
+ // CHECK: test_vget_low_s16
+ return vget_low_s16(a);
+ // CHECK: dup d0, {{v[0-9]+}}.d[0]
+}
+
+int32x2_t test_vget_low_s32(int32x4_t a) {
+ // CHECK: test_vget_low_s32
+ return vget_low_s32(a);
+ // CHECK: dup d0, {{v[0-9]+}}.d[0]
+}
+
+int64x1_t test_vget_low_s64(int64x2_t a) {
+ // CHECK: test_vget_low_s64
+ return vget_low_s64(a);
+ // CHECK: dup d0, {{v[0-9]+}}.d[0]
+}
+
+uint8x8_t test_vget_low_u8(uint8x16_t a) {
+ // CHECK: test_vget_low_u8
+ return vget_low_u8(a);
+ // CHECK: dup d0, {{v[0-9]+}}.d[0]
+}
+
+uint16x4_t test_vget_low_u16(uint16x8_t a) {
+ // CHECK: test_vget_low_u16
+ return vget_low_u16(a);
+ // CHECK: dup d0, {{v[0-9]+}}.d[0]
+}
+
+uint32x2_t test_vget_low_u32(uint32x4_t a) {
+ // CHECK: test_vget_low_u32
+ return vget_low_u32(a);
+ // CHECK: dup d0, {{v[0-9]+}}.d[0]
+}
+
+uint64x1_t test_vget_low_u64(uint64x2_t a) {
+ // CHECK: test_vget_low_u64
+ return vget_low_u64(a);
+ // CHECK: dup d0, {{v[0-9]+}}.d[0]
+}
+
+poly64x1_t test_vget_low_p64(poly64x2_t a) {
+ // CHECK: test_vget_low_p64
+ return vget_low_p64(a);
+ // CHECK: dup d0, {{v[0-9]+}}.d[0]
+}
+
+float16x4_t test_vget_low_f16(float16x8_t a) {
+ // CHECK: test_vget_low_f16
+ return vget_low_f16(a);
+ // CHECK: dup d0, {{v[0-9]+}}.d[0]
+}
+
+float32x2_t test_vget_low_f32(float32x4_t a) {
+ // CHECK: test_vget_low_f32
+ return vget_low_f32(a);
+ // CHECK: dup d0, {{v[0-9]+}}.d[0]
+}
+
+poly8x8_t test_vget_low_p8(poly8x16_t a) {
+ // CHECK: test_vget_low_p8
+ return vget_low_p8(a);
+ // CHECK: dup d0, {{v[0-9]+}}.d[0]
+}
+
+poly16x4_t test_vget_low_p16(poly16x8_t a) {
+ // CHECK: test_vget_low_p16
+ return vget_low_p16(a);
+ // CHECK: dup d0, {{v[0-9]+}}.d[0]
+}
+
+float64x1_t test_vget_low_f64(float64x2_t a) {
+ // CHECK: test_vget_low_f64
+ return vget_low_f64(a);
+ // CHECK: dup d0, {{v[0-9]+}}.d[0]
+}
+
diff --git a/lib/Target/AArch64/AArch64InstrNEON.td b/lib/Target/AArch64/AArch64InstrNEON.td
index 09d2876..a6ffbf9 100644
--- a/lib/Target/AArch64/AArch64InstrNEON.td
+++ b/lib/Target/AArch64/AArch64InstrNEON.td
@@ -1590,16 +1590,31 @@ def Neon_High8H : PatFrag<(ops node:$in),
(extract_subvector (v8i16 node:$in), (iPTR 4))>;
def Neon_High4S : PatFrag<(ops node:$in),
(extract_subvector (v4i32 node:$in), (iPTR 2))>;
-
+def Neon_High2D : PatFrag<(ops node:$in),
+ (extract_subvector (v2i64 node:$in), (iPTR 1))>;
+def Neon_High4f : PatFrag<(ops node:$in),
+ (extract_subvector (v4f32 node:$in), (iPTR 2))>;
+def Neon_High2d : PatFrag<(ops node:$in),
+ (extract_subvector (v2f64 node:$in), (iPTR 1))>;
+
+def Neon_low16B : PatFrag<(ops node:$in),
+ (v8i8 (extract_subvector (v16i8 node:$in),
+ (iPTR 0)))>;
def Neon_low8H : PatFrag<(ops node:$in),
(v4i16 (extract_subvector (v8i16 node:$in),
(iPTR 0)))>;
def Neon_low4S : PatFrag<(ops node:$in),
(v2i32 (extract_subvector (v4i32 node:$in),
(iPTR 0)))>;
+def Neon_low2D : PatFrag<(ops node:$in),
+ (v1i64 (extract_subvector (v2i64 node:$in),
+ (iPTR 0)))>;
def Neon_low4f : PatFrag<(ops node:$in),
(v2f32 (extract_subvector (v4f32 node:$in),
(iPTR 0)))>;
+def Neon_low2d : PatFrag<(ops node:$in),
+ (v1f64 (extract_subvector (v2f64 node:$in),
+ (iPTR 0)))>;
def neon_uimm3_shift : Operand<i32>,
ImmLeaf<i32, [{return Imm < 8;}]> {
@@ -5126,9 +5141,8 @@ def DUPdv_D : NeonI_Scalar_DUP<"dup", "d", FPR64, VPR128, neon_uimm1_bare> {
multiclass NeonI_Scalar_DUP_Elt_pattern<Instruction DUPI, ValueType ResTy,
ValueType OpTy, Operand OpImm,
ValueType OpNTy, ValueType ExTy, Operand OpNImm> {
-
- def : Pat<(ResTy (vector_extract (OpTy VPR128:$Rn), OpImm:$Imm)),
- (ResTy (DUPI (OpTy VPR128:$Rn), OpImm:$Imm))>;
+ def : Pat<(ResTy (vector_extract (OpTy VPR128:$Rn), OpImm:$Imm)),
+ (ResTy (DUPI (OpTy VPR128:$Rn), OpImm:$Imm))>;
def : Pat<(ResTy (vector_extract (OpNTy VPR64:$Rn), OpNImm:$Imm)),
(ResTy (DUPI
@@ -5136,6 +5150,21 @@ multiclass NeonI_Scalar_DUP_Elt_pattern<Instruction DUPI, ValueType ResTy,
OpNImm:$Imm))>;
}
+multiclass NeonI_SDUP<PatFrag GetLow, PatFrag GetHigh,
+ ValueType ResTy, ValueType OpTy> {
+ def : Pat<(ResTy (GetLow VPR128:$Rn)),
+ (ResTy (DUPdv_D (OpTy VPR128:$Rn), 0))>;
+ def : Pat<(ResTy (GetHigh VPR128:$Rn)),
+ (ResTy (DUPdv_D (OpTy VPR128:$Rn), 1))>;
+}
+
+defm : NeonI_SDUP<Neon_low16B, Neon_High16B, v8i8, v16i8>;
+defm : NeonI_SDUP<Neon_low8H, Neon_High8H, v4i16, v8i16>;
+defm : NeonI_SDUP<Neon_low4S, Neon_High4S, v2i32, v4i32>;
+defm : NeonI_SDUP<Neon_low2D, Neon_High2D, v1i64, v2i64>;
+defm : NeonI_SDUP<Neon_low4f, Neon_High4f, v2f32, v4f32>;
+defm : NeonI_SDUP<Neon_low2d, Neon_High2d, v1f64, v2f64>;
+
// Patterns for vector extract of FP data using scalar DUP instructions
defm : NeonI_Scalar_DUP_Elt_pattern<DUPsv_S, f32,
v4f32, neon_uimm2_bare, v2f32, v4f32, neon_uimm1_bare>;
diff --git a/test/CodeGen/AArch64/neon-simd-vget.ll b/test/CodeGen/AArch64/neon-simd-vget.ll
new file mode 100644
index 0000000..f389703
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-simd-vget.ll
@@ -0,0 +1,225 @@
+; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
+
+define <8 x i8> @test_vget_high_s8(<16 x i8> %a) {
+; CHECK: test_vget_high_s8:
+; CHECK: dup d0, {{v[0-9]+}}.d[1]
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <8 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_vget_high_s16(<8 x i16> %a) {
+; CHECK: test_vget_high_s16:
+; CHECK: dup d0, {{v[0-9]+}}.d[1]
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ ret <4 x i16> %shuffle.i
+}
+
+define <2 x i32> @test_vget_high_s32(<4 x i32> %a) {
+; CHECK: test_vget_high_s32:
+; CHECK: dup d0, {{v[0-9]+}}.d[1]
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ ret <2 x i32> %shuffle.i
+}
+
+define <1 x i64> @test_vget_high_s64(<2 x i64> %a) {
+; CHECK: test_vget_high_s64:
+; CHECK: dup d0, {{v[0-9]+}}.d[1]
+entry:
+ %shuffle.i = shufflevector <2 x i64> %a, <2 x i64> undef, <1 x i32> <i32 1>
+ ret <1 x i64> %shuffle.i
+}
+
+define <8 x i8> @test_vget_high_u8(<16 x i8> %a) {
+; CHECK: test_vget_high_u8:
+; CHECK: dup d0, {{v[0-9]+}}.d[1]
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <8 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_vget_high_u16(<8 x i16> %a) {
+; CHECK: test_vget_high_u16:
+; CHECK: dup d0, {{v[0-9]+}}.d[1]
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ ret <4 x i16> %shuffle.i
+}
+
+define <2 x i32> @test_vget_high_u32(<4 x i32> %a) {
+; CHECK: test_vget_high_u32:
+; CHECK: dup d0, {{v[0-9]+}}.d[1]
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ ret <2 x i32> %shuffle.i
+}
+
+define <1 x i64> @test_vget_high_u64(<2 x i64> %a) {
+; CHECK: test_vget_high_u64:
+; CHECK: dup d0, {{v[0-9]+}}.d[1]
+entry:
+ %shuffle.i = shufflevector <2 x i64> %a, <2 x i64> undef, <1 x i32> <i32 1>
+ ret <1 x i64> %shuffle.i
+}
+
+define <1 x i64> @test_vget_high_p64(<2 x i64> %a) {
+; CHECK: test_vget_high_p64:
+; CHECK: dup d0, {{v[0-9]+}}.d[1]
+entry:
+ %shuffle.i = shufflevector <2 x i64> %a, <2 x i64> undef, <1 x i32> <i32 1>
+ ret <1 x i64> %shuffle.i
+}
+
+define <4 x i16> @test_vget_high_f16(<8 x i16> %a) {
+; CHECK: test_vget_high_f16:
+; CHECK: dup d0, {{v[0-9]+}}.d[1]
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ ret <4 x i16> %shuffle.i
+}
+
+define <2 x float> @test_vget_high_f32(<4 x float> %a) {
+; CHECK: test_vget_high_f32:
+; CHECK: dup d0, {{v[0-9]+}}.d[1]
+entry:
+ %shuffle.i = shufflevector <4 x float> %a, <4 x float> undef, <2 x i32> <i32 2, i32 3>
+ ret <2 x float> %shuffle.i
+}
+
+define <8 x i8> @test_vget_high_p8(<16 x i8> %a) {
+; CHECK: test_vget_high_p8:
+; CHECK: dup d0, {{v[0-9]+}}.d[1]
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <8 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_vget_high_p16(<8 x i16> %a) {
+; CHECK: test_vget_high_p16:
+; CHECK: dup d0, {{v[0-9]+}}.d[1]
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ ret <4 x i16> %shuffle.i
+}
+
+define <1 x double> @test_vget_high_f64(<2 x double> %a) {
+; CHECK: test_vget_high_f64:
+; CHECK: dup d0, {{v[0-9]+}}.d[1]
+entry:
+ %shuffle.i = shufflevector <2 x double> %a, <2 x double> undef, <1 x i32> <i32 1>
+ ret <1 x double> %shuffle.i
+}
+
+define <8 x i8> @test_vget_low_s8(<16 x i8> %a) {
+; CHECK: test_vget_low_s8:
+; CHECK: dup d0, {{v[0-9]+}}.d[0]
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_vget_low_s16(<8 x i16> %a) {
+; CHECK: test_vget_low_s16:
+; CHECK: dup d0, {{v[0-9]+}}.d[0]
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x i16> %shuffle.i
+}
+
+define <2 x i32> @test_vget_low_s32(<4 x i32> %a) {
+; CHECK: test_vget_low_s32:
+; CHECK: dup d0, {{v[0-9]+}}.d[0]
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 0, i32 1>
+ ret <2 x i32> %shuffle.i
+}
+
+define <1 x i64> @test_vget_low_s64(<2 x i64> %a) {
+; CHECK: test_vget_low_s64:
+; CHECK: dup d0, {{v[0-9]+}}.d[0]
+entry:
+ %shuffle.i = shufflevector <2 x i64> %a, <2 x i64> undef, <1 x i32> zeroinitializer
+ ret <1 x i64> %shuffle.i
+}
+
+define <8 x i8> @test_vget_low_u8(<16 x i8> %a) {
+; CHECK: test_vget_low_u8:
+; CHECK: dup d0, {{v[0-9]+}}.d[0]
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_vget_low_u16(<8 x i16> %a) {
+; CHECK: test_vget_low_u16:
+; CHECK: dup d0, {{v[0-9]+}}.d[0]
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x i16> %shuffle.i
+}
+
+define <2 x i32> @test_vget_low_u32(<4 x i32> %a) {
+; CHECK: test_vget_low_u32:
+; CHECK: dup d0, {{v[0-9]+}}.d[0]
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 0, i32 1>
+ ret <2 x i32> %shuffle.i
+}
+
+define <1 x i64> @test_vget_low_u64(<2 x i64> %a) {
+; CHECK: test_vget_low_u64:
+; CHECK: dup d0, {{v[0-9]+}}.d[0]
+entry:
+ %shuffle.i = shufflevector <2 x i64> %a, <2 x i64> undef, <1 x i32> zeroinitializer
+ ret <1 x i64> %shuffle.i
+}
+
+define <1 x i64> @test_vget_low_p64(<2 x i64> %a) {
+; CHECK: test_vget_low_p64:
+; CHECK: dup d0, {{v[0-9]+}}.d[0]
+entry:
+ %shuffle.i = shufflevector <2 x i64> %a, <2 x i64> undef, <1 x i32> zeroinitializer
+ ret <1 x i64> %shuffle.i
+}
+
+define <4 x i16> @test_vget_low_f16(<8 x i16> %a) {
+; CHECK: test_vget_low_f16:
+; CHECK: dup d0, {{v[0-9]+}}.d[0]
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x i16> %shuffle.i
+}
+
+define <2 x float> @test_vget_low_f32(<4 x float> %a) {
+; CHECK: test_vget_low_f32:
+; CHECK: dup d0, {{v[0-9]+}}.d[0]
+entry:
+ %shuffle.i = shufflevector <4 x float> %a, <4 x float> undef, <2 x i32> <i32 0, i32 1>
+ ret <2 x float> %shuffle.i
+}
+
+define <8 x i8> @test_vget_low_p8(<16 x i8> %a) {
+; CHECK: test_vget_low_p8:
+; CHECK: dup d0, {{v[0-9]+}}.d[0]
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_vget_low_p16(<8 x i16> %a) {
+; CHECK: test_vget_low_p16:
+; CHECK: dup d0, {{v[0-9]+}}.d[0]
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x i16> %shuffle.i
+}
+
+define <1 x double> @test_vget_low_f64(<2 x double> %a) {
+; CHECK: test_vget_low_f64:
+; CHECK: dup d0, {{v[0-9]+}}.d[0]
+entry:
+ %shuffle.i = shufflevector <2 x double> %a, <2 x double> undef, <1 x i32> zeroinitializer
+ ret <1 x double> %shuffle.i
+}
_______________________________________________
cfe-commits mailing list
[email protected]
http://lists.cs.uiuc.edu/mailman/listinfo/cfe-commits