https://github.com/kcloudy0717 updated 
https://github.com/llvm/llvm-project/pull/188488

>From d22c872ec275bcbb2f39aec77e0333d0df79cf77 Mon Sep 17 00:00:00 2001
From: Kai Huang <[email protected]>
Date: Wed, 25 Mar 2026 17:11:57 +0800
Subject: [PATCH 1/2] [CodeGen][HLSL] Improved QuadReadAcrossX CodeGen test

---
 .../CodeGenHLSL/builtins/QuadReadAcrossX.hlsl | 215 ++++++++++++++----
 .../BuiltIns/QuadReadAcrossX-errors.hlsl      |   2 +-
 2 files changed, 171 insertions(+), 46 deletions(-)

diff --git a/clang/test/CodeGenHLSL/builtins/QuadReadAcrossX.hlsl 
b/clang/test/CodeGenHLSL/builtins/QuadReadAcrossX.hlsl
index e0f14cd8be9b5..2c9519bda777d 100644
--- a/clang/test/CodeGenHLSL/builtins/QuadReadAcrossX.hlsl
+++ b/clang/test/CodeGenHLSL/builtins/QuadReadAcrossX.hlsl
@@ -1,46 +1,171 @@
-// RUN: %clang_cc1 -std=hlsl2021 -finclude-default-header -triple \
-// RUN:   dxil-pc-shadermodel6.3-compute %s -emit-llvm -disable-llvm-passes -o 
- | \
-// RUN:   FileCheck %s --check-prefixes=CHECK,CHECK-DXIL
-// RUN: %clang_cc1 -std=hlsl2021 -finclude-default-header -triple \
-// RUN:   spirv-pc-vulkan-compute %s -emit-llvm -disable-llvm-passes -o - | \
-// RUN:   FileCheck %s --check-prefixes=CHECK,CHECK-SPIRV
-
-// Test basic lowering to runtime function call.
-
-// CHECK-LABEL: test_int
-int test_int(int expr) {
-  // CHECK-SPIRV:  %[[RET:.*]] = call spir_func [[TY:.*]] 
@llvm.spv.quad.read.across.x.i32([[TY]] %[[#]])
-  // CHECK-DXIL:  %[[RET:.*]] = call [[TY:.*]] 
@llvm.dx.quad.read.across.x.i32([[TY]] %[[#]])
-  // CHECK:  ret [[TY]] %[[RET]]
-  return QuadReadAcrossX(expr);
-}
-
-// CHECK-DXIL: declare [[TY]] @llvm.dx.quad.read.across.x.i32([[TY]]) 
#[[#attr:]]
-// CHECK-SPIRV: declare [[TY]] @llvm.spv.quad.read.across.x.i32([[TY]]) 
#[[#attr:]]
-
-// CHECK-LABEL: test_uint64_t
-uint64_t test_uint64_t(uint64_t expr) {
-  // CHECK-SPIRV:  %[[RET:.*]] = call spir_func [[TY:.*]] 
@llvm.spv.quad.read.across.x.i64([[TY]] %[[#]])
-  // CHECK-DXIL:  %[[RET:.*]] = call [[TY:.*]] 
@llvm.dx.quad.read.across.x.i64([[TY]] %[[#]])
-  // CHECK:  ret [[TY]] %[[RET]]
-  return QuadReadAcrossX(expr);
-}
-
-// CHECK-DXIL: declare [[TY]] @llvm.dx.quad.read.across.x.i64([[TY]]) 
#[[#attr:]]
-// CHECK-SPIRV: declare [[TY]] @llvm.spv.quad.read.across.x.i64([[TY]]) 
#[[#attr:]]
-
-// Test basic lowering to runtime function call with array and float value.
-
-// CHECK-LABEL: test_floatv4
-float4 test_floatv4(float4 expr) {
-  // CHECK-SPIRV:  %[[RET1:.*]] = call reassoc nnan ninf nsz arcp afn 
spir_func [[TY1:.*]] @llvm.spv.quad.read.across.x.v4f32([[TY1]] %[[#]]
-  // CHECK-DXIL:  %[[RET1:.*]] = call reassoc nnan ninf nsz arcp afn 
[[TY1:.*]] @llvm.dx.quad.read.across.x.v4f32([[TY1]] %[[#]])
-  // CHECK:  ret [[TY1]] %[[RET1]]
-  return QuadReadAcrossX(expr);
-}
-
-// CHECK-DXIL: declare [[TY1]] @llvm.dx.quad.read.across.x.v4f32([[TY1]]) 
#[[#attr]]
-// CHECK-SPIRV: declare [[TY1]] @llvm.spv.quad.read.across.x.v4f32([[TY1]]) 
#[[#attr]]
-
-// CHECK: attributes #[[#attr]] = {{{.*}} convergent {{.*}}}
+// RUN: %clang_cc1 -finclude-default-header -x hlsl -triple \
+// RUN:   dxil-pc-shadermodel6.3-compute %s -fnative-half-type 
-fnative-int16-type \
+// RUN:   -emit-llvm -disable-llvm-passes -o - | FileCheck %s \
+// RUN:   --check-prefixes=CHECK,CHECK-DXIL,CHECK-NATIVE_HALF
+// RUN: %clang_cc1 -finclude-default-header -x hlsl -triple \
+// RUN:   dxil-pc-shadermodel6.3-compute %s -emit-llvm -disable-llvm-passes \
+// RUN:   -o - | FileCheck %s --check-prefixes=CHECK,CHECK-DXIL,CHECK-NO_HALF
 
+// RUN: %clang_cc1 -finclude-default-header -x hlsl -triple \
+// RUN:   spirv-unknown-vulkan-compute %s -fnative-half-type 
-fnative-int16-type \
+// RUN:   -emit-llvm -disable-llvm-passes -o - | FileCheck %s \
+// RUN:   --check-prefixes=CHECK,CHECK-SPIRV,CHECK-NATIVE_HALF
+// RUN: %clang_cc1 -finclude-default-header -x hlsl -triple \
+// RUN:   spirv-unknown-vulkan-compute %s -emit-llvm -disable-llvm-passes \
+// RUN:   -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SPIRV,CHECK-NO_HALF
+
+// Capture the expected interchange format so not every check needs to be 
duplicated
+// CHECK-DXIL: %[[RET:.*]] = call [[CC:]]i32 
@llvm.[[ICF:dx]].quad.read.across.x.i32(i32 %[[#]])
+// CHECK-SPIRV: %[[RET:.*]] = call [[CC:spir_func ]]i32 
@llvm.[[ICF:spv]].quad.read.across.x.i32(i32 %[[#]])
+// CHECK: ret i32 %[[RET]]
+int test_int(int expr) { return QuadReadAcrossX(expr); }
+
+// CHECK: %[[RET:.*]] = call [[CC]]<2 x i32> 
@llvm.[[ICF]].quad.read.across.x.v2i32(<2 x i32> %[[#]])
+// CHECK: ret <2 x i32> %[[RET]]
+int2 test_int2(int2 expr) { return QuadReadAcrossX(expr); }
+
+// CHECK: %[[RET:.*]] = call [[CC]]<3 x i32> 
@llvm.[[ICF]].quad.read.across.x.v3i32(<3 x i32> %[[#]])
+// CHECK: ret <3 x i32> %[[RET]]
+int3 test_int3(int3 expr) { return QuadReadAcrossX(expr); }
+
+// CHECK: %[[RET:.*]] = call [[CC]]<4 x i32> 
@llvm.[[ICF]].quad.read.across.x.v4i32(<4 x i32> %[[#]])
+// CHECK: ret <4 x i32> %[[RET]]
+int4 test_int4(int4 expr) { return QuadReadAcrossX(expr); }
+
+// CHECK: %[[RET:.*]] = call [[CC]]i32 
@llvm.[[ICF]].quad.read.across.x.i32(i32 %[[#]])
+// CHECK: ret i32 %[[RET]]
+uint test_uint(uint expr) { return QuadReadAcrossX(expr); }
+
+// CHECK: %[[RET:.*]] = call [[CC]]<2 x i32> 
@llvm.[[ICF]].quad.read.across.x.v2i32(<2 x i32> %[[#]])
+// CHECK: ret <2 x i32> %[[RET]]
+uint2 test_uint2(uint2 expr) { return QuadReadAcrossX(expr); }
+
+// CHECK: %[[RET:.*]] = call [[CC]]<3 x i32> 
@llvm.[[ICF]].quad.read.across.x.v3i32(<3 x i32> %[[#]])
+// CHECK: ret <3 x i32> %[[RET]]
+uint3 test_uint3(uint3 expr) { return QuadReadAcrossX(expr); }
+
+// CHECK: %[[RET:.*]] = call [[CC]]<4 x i32> 
@llvm.[[ICF]].quad.read.across.x.v4i32(<4 x i32> %[[#]])
+// CHECK: ret <4 x i32> %[[RET]]
+uint4 test_uint4(uint4 expr) { return QuadReadAcrossX(expr); }
+
+// CHECK: %[[RET:.*]] = call [[CC]]i64 
@llvm.[[ICF]].quad.read.across.x.i64(i64 %[[#]])
+// CHECK: ret i64 %[[RET]]
+int64_t test_int64_t(int64_t expr) { return QuadReadAcrossX(expr); }
+
+// CHECK: %[[RET:.*]] = call [[CC]]<2 x i64> 
@llvm.[[ICF]].quad.read.across.x.v2i64(<2 x i64> %[[#]])
+// CHECK: ret <2 x i64> %[[RET]]
+int64_t2 test_int64_t2(int64_t2 expr) { return QuadReadAcrossX(expr); }
+
+// CHECK: %[[RET:.*]] = call [[CC]]<3 x i64> 
@llvm.[[ICF]].quad.read.across.x.v3i64(<3 x i64> %[[#]])
+// CHECK: ret <3 x i64> %[[RET]]
+int64_t3 test_int64_t3(int64_t3 expr) { return QuadReadAcrossX(expr); }
+
+// CHECK: %[[RET:.*]] = call [[CC]]<4 x i64> 
@llvm.[[ICF]].quad.read.across.x.v4i64(<4 x i64> %[[#]])
+// CHECK: ret <4 x i64> %[[RET]]
+int64_t4 test_int64_t4(int64_t4 expr) { return QuadReadAcrossX(expr); }
+
+// CHECK: %[[RET:.*]] = call [[CC]]i64 
@llvm.[[ICF]].quad.read.across.x.i64(i64 %[[#]])
+// CHECK: ret i64 %[[RET]]
+uint64_t test_uint64_t(uint64_t expr) { return QuadReadAcrossX(expr); }
+
+// CHECK: %[[RET:.*]] = call [[CC]]<2 x i64> 
@llvm.[[ICF]].quad.read.across.x.v2i64(<2 x i64> %[[#]])
+// CHECK: ret <2 x i64> %[[RET]]
+uint64_t2 test_uint64_t2(uint64_t2 expr) { return QuadReadAcrossX(expr); }
+
+// CHECK: %[[RET:.*]] = call [[CC]]<3 x i64> 
@llvm.[[ICF]].quad.read.across.x.v3i64(<3 x i64> %[[#]])
+// CHECK: ret <3 x i64> %[[RET]]
+uint64_t3 test_uint64_t3(uint64_t3 expr) { return QuadReadAcrossX(expr); }
+
+// CHECK: %[[RET:.*]] = call [[CC]]<4 x i64> 
@llvm.[[ICF]].quad.read.across.x.v4i64(<4 x i64> %[[#]])
+// CHECK: ret <4 x i64> %[[RET]]
+uint64_t4 test_uint64_t4(uint64_t4 expr) { return QuadReadAcrossX(expr); }
+
+// CHECK: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]float 
@llvm.[[ICF]].quad.read.across.x.f32(float %[[#]])
+// CHECK: ret float %[[RET]]
+float test_float(float expr) { return QuadReadAcrossX(expr); }
+
+// CHECK: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<2 x float> 
@llvm.[[ICF]].quad.read.across.x.v2f32(<2 x float> %[[#]])
+// CHECK: ret <2 x float> %[[RET]]
+float2 test_float2(float2 expr) { return QuadReadAcrossX(expr); }
+
+// CHECK: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<3 x float> 
@llvm.[[ICF]].quad.read.across.x.v3f32(<3 x float> %[[#]])
+// CHECK: ret <3 x float> %[[RET]]
+float3 test_float3(float3 expr) { return QuadReadAcrossX(expr); }
+
+// CHECK: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<4 x float> 
@llvm.[[ICF]].quad.read.across.x.v4f32(<4 x float> %[[#]])
+// CHECK: ret <4 x float> %[[RET]]
+float4 test_float4(float4 expr) { return QuadReadAcrossX(expr); }
+
+// CHECK: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]double 
@llvm.[[ICF]].quad.read.across.x.f64(double %[[#]])
+// CHECK: ret double %[[RET]]
+double test_double(double expr) { return QuadReadAcrossX(expr); }
+
+// CHECK: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<2 x double> 
@llvm.[[ICF]].quad.read.across.x.v2f64(<2 x double> %[[#]])
+// CHECK: ret <2 x double> %[[RET]]
+double2 test_double2(double2 expr) { return QuadReadAcrossX(expr); }
+
+// CHECK: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<3 x double> 
@llvm.[[ICF]].quad.read.across.x.v3f64(<3 x double> %[[#]])
+// CHECK: ret <3 x double> %[[RET]]
+double3 test_double3(double3 expr) { return QuadReadAcrossX(expr); }
+
+// CHECK: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<4 x double> 
@llvm.[[ICF]].quad.read.across.x.v4f64(<4 x double> %[[#]])
+// CHECK: ret <4 x double> %[[RET]]
+double4 test_double4(double4 expr) { return QuadReadAcrossX(expr); }
+
+// CHECK-NATIVE_HALF: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn 
[[CC]]half @llvm.[[ICF]].quad.read.across.x.f16(half %[[#]])
+// CHECK-NATIVE_HALF: ret half %[[RET]]
+// CHECK-NO_HALF: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn 
[[CC]]float @llvm.[[ICF]].quad.read.across.x.f32(float %[[#]])
+// CHECK-NO_HALF: ret float %[[RET]]
+half test_half(half expr) { return QuadReadAcrossX(expr); }
+
+// CHECK-NATIVE_HALF: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn 
[[CC]]<2 x half> @llvm.[[ICF]].quad.read.across.x.v2f16(<2 x half> %[[#]])
+// CHECK-NATIVE_HALF: ret <2 x half> %[[RET]]
+// CHECK-NO_HALF: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<2 x 
float> @llvm.[[ICF]].quad.read.across.x.v2f32(<2 x float> %[[#]])
+// CHECK-NO_HALF: ret <2 x float> %[[RET]]
+half2 test_half2(half2 expr) { return QuadReadAcrossX(expr); }
+
+// CHECK-NATIVE_HALF: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn 
[[CC]]<3 x half> @llvm.[[ICF]].quad.read.across.x.v3f16(<3 x half> %[[#]])
+// CHECK-NATIVE_HALF: ret <3 x half> %[[RET]]
+// CHECK-NO_HALF: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<3 x 
float> @llvm.[[ICF]].quad.read.across.x.v3f32(<3 x float> %[[#]])
+// CHECK-NO_HALF: ret <3 x float> %[[RET]]
+half3 test_half3(half3 expr) { return QuadReadAcrossX(expr); }
+
+// CHECK-NATIVE_HALF: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn 
[[CC]]<4 x half> @llvm.[[ICF]].quad.read.across.x.v4f16(<4 x half> %[[#]])
+// CHECK-NATIVE_HALF: ret <4 x half> %[[RET]]
+// CHECK-NO_HALF: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<4 x 
float> @llvm.[[ICF]].quad.read.across.x.v4f32(<4 x float> %[[#]])
+// CHECK-NO_HALF: ret <4 x float> %[[RET]]
+half4 test_half4(half4 expr) { return QuadReadAcrossX(expr); }
+
+#ifdef __HLSL_ENABLE_16_BIT
+// CHECK-NATIVE_HALF: %[[RET:.*]] = call [[CC]]i16 
@llvm.[[ICF]].quad.read.across.x.i16(i16 %[[#]])
+// CHECK-NATIVE_HALF: ret i16 %[[RET]]
+int16_t test_int16_t(int16_t expr) { return QuadReadAcrossX(expr); }
+
+// CHECK-NATIVE_HALF: %[[RET:.*]] = call [[CC]]<2 x i16> 
@llvm.[[ICF]].quad.read.across.x.v2i16(<2 x i16> %[[#]])
+// CHECK-NATIVE_HALF: ret <2 x i16> %[[RET]]
+int16_t2 test_int16_t2(int16_t2 expr) { return QuadReadAcrossX(expr); }
+
+// CHECK-NATIVE_HALF: %[[RET:.*]] = call [[CC]]<3 x i16> 
@llvm.[[ICF]].quad.read.across.x.v3i16(<3 x i16> %[[#]])
+// CHECK-NATIVE_HALF: ret <3 x i16> %[[RET]]
+int16_t3 test_int16_t3(int16_t3 expr) { return QuadReadAcrossX(expr); }
+
+// CHECK-NATIVE_HALF: %[[RET:.*]] = call [[CC]]<4 x i16> 
@llvm.[[ICF]].quad.read.across.x.v4i16(<4 x i16> %[[#]])
+// CHECK-NATIVE_HALF: ret <4 x i16> %[[RET]]
+int16_t4 test_int16_t4(int16_t4 expr) { return QuadReadAcrossX(expr); }
+
+// CHECK-NATIVE_HALF: %[[RET:.*]] = call [[CC]]i16 
@llvm.[[ICF]].quad.read.across.x.i16(i16 %[[#]])
+// CHECK-NATIVE_HALF: ret i16 %[[RET]]
+uint16_t test_uint16_t(uint16_t expr) { return QuadReadAcrossX(expr); }
+
+// CHECK-NATIVE_HALF: %[[RET:.*]] = call [[CC]]<2 x i16> 
@llvm.[[ICF]].quad.read.across.x.v2i16(<2 x i16> %[[#]])
+// CHECK-NATIVE_HALF: ret <2 x i16> %[[RET]]
+uint16_t2 test_uint16_t2(uint16_t2 expr) { return QuadReadAcrossX(expr); }
+
+// CHECK-NATIVE_HALF: %[[RET:.*]] = call [[CC]]<3 x i16> 
@llvm.[[ICF]].quad.read.across.x.v3i16(<3 x i16> %[[#]])
+// CHECK-NATIVE_HALF: ret <3 x i16> %[[RET]]
+uint16_t3 test_uint16_t3(uint16_t3 expr) { return QuadReadAcrossX(expr); }
+
+// CHECK-NATIVE_HALF: %[[RET:.*]] = call [[CC]]<4 x i16> 
@llvm.[[ICF]].quad.read.across.x.v4i16(<4 x i16> %[[#]])
+// CHECK-NATIVE_HALF: ret <4 x i16> %[[RET]]
+uint16_t4 test_uint16_t4(uint16_t4 expr) { return QuadReadAcrossX(expr); }
+#endif
diff --git a/clang/test/SemaHLSL/BuiltIns/QuadReadAcrossX-errors.hlsl 
b/clang/test/SemaHLSL/BuiltIns/QuadReadAcrossX-errors.hlsl
index a9dcc162bbbb5..c3fb0628aca1f 100644
--- a/clang/test/SemaHLSL/BuiltIns/QuadReadAcrossX-errors.hlsl
+++ b/clang/test/SemaHLSL/BuiltIns/QuadReadAcrossX-errors.hlsl
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -finclude-default-header -triple 
dxil-pc-shadermodel6.6-library %s -emit-llvm-only -disable-llvm-passes -verify
+// RUN: %clang_cc1 -finclude-default-header -triple 
dxil-pc-shadermodel6.6-library %s -verify
 
 int test_too_few_arg() {
   return __builtin_hlsl_quad_read_across_x();

>From 672e7ce40e2c8b1971048161643c1e7e132668fd Mon Sep 17 00:00:00 2001
From: Kai Huang <[email protected]>
Date: Sat, 11 Apr 2026 17:37:43 +0800
Subject: [PATCH 2/2] Use -D command for specifying target and cc

---
 .../CodeGenHLSL/builtins/QuadReadAcrossX.hlsl | 90 +++++++++----------
 1 file changed, 44 insertions(+), 46 deletions(-)

diff --git a/clang/test/CodeGenHLSL/builtins/QuadReadAcrossX.hlsl 
b/clang/test/CodeGenHLSL/builtins/QuadReadAcrossX.hlsl
index 2c9519bda777d..54dd82b9fd485 100644
--- a/clang/test/CodeGenHLSL/builtins/QuadReadAcrossX.hlsl
+++ b/clang/test/CodeGenHLSL/builtins/QuadReadAcrossX.hlsl
@@ -1,171 +1,169 @@
 // RUN: %clang_cc1 -finclude-default-header -x hlsl -triple \
 // RUN:   dxil-pc-shadermodel6.3-compute %s -fnative-half-type 
-fnative-int16-type \
 // RUN:   -emit-llvm -disable-llvm-passes -o - | FileCheck %s \
-// RUN:   --check-prefixes=CHECK,CHECK-DXIL,CHECK-NATIVE_HALF
+// RUN:   --check-prefixes=CHECK,CHECK-NATIVE_HALF -DTARGET=dx -DCC=""
 // RUN: %clang_cc1 -finclude-default-header -x hlsl -triple \
 // RUN:   dxil-pc-shadermodel6.3-compute %s -emit-llvm -disable-llvm-passes \
-// RUN:   -o - | FileCheck %s --check-prefixes=CHECK,CHECK-DXIL,CHECK-NO_HALF
+// RUN:   -o - | FileCheck %s --check-prefixes=CHECK,CHECK-NO_HALF -DTARGET=dx 
-DCC=""
 
 // RUN: %clang_cc1 -finclude-default-header -x hlsl -triple \
 // RUN:   spirv-unknown-vulkan-compute %s -fnative-half-type 
-fnative-int16-type \
 // RUN:   -emit-llvm -disable-llvm-passes -o - | FileCheck %s \
-// RUN:   --check-prefixes=CHECK,CHECK-SPIRV,CHECK-NATIVE_HALF
+// RUN:   --check-prefixes=CHECK,CHECK-NATIVE_HALF -DTARGET=spv 
-DCC="spir_func "
 // RUN: %clang_cc1 -finclude-default-header -x hlsl -triple \
 // RUN:   spirv-unknown-vulkan-compute %s -emit-llvm -disable-llvm-passes \
-// RUN:   -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SPIRV,CHECK-NO_HALF
+// RUN:   -o - | FileCheck %s --check-prefixes=CHECK,CHECK-NO_HALF 
-DTARGET=spv -DCC="spir_func " 
 
-// Capture the expected interchange format so not every check needs to be 
duplicated
-// CHECK-DXIL: %[[RET:.*]] = call [[CC:]]i32 
@llvm.[[ICF:dx]].quad.read.across.x.i32(i32 %[[#]])
-// CHECK-SPIRV: %[[RET:.*]] = call [[CC:spir_func ]]i32 
@llvm.[[ICF:spv]].quad.read.across.x.i32(i32 %[[#]])
+// CHECK: %[[RET:.*]] = call [[CC]]i32 
@llvm.[[TARGET]].quad.read.across.x.i32(i32 %[[#]])
 // CHECK: ret i32 %[[RET]]
 int test_int(int expr) { return QuadReadAcrossX(expr); }
 
-// CHECK: %[[RET:.*]] = call [[CC]]<2 x i32> 
@llvm.[[ICF]].quad.read.across.x.v2i32(<2 x i32> %[[#]])
+// CHECK: %[[RET:.*]] = call [[CC]]<2 x i32> 
@llvm.[[TARGET]].quad.read.across.x.v2i32(<2 x i32> %[[#]])
 // CHECK: ret <2 x i32> %[[RET]]
 int2 test_int2(int2 expr) { return QuadReadAcrossX(expr); }
 
-// CHECK: %[[RET:.*]] = call [[CC]]<3 x i32> 
@llvm.[[ICF]].quad.read.across.x.v3i32(<3 x i32> %[[#]])
+// CHECK: %[[RET:.*]] = call [[CC]]<3 x i32> 
@llvm.[[TARGET]].quad.read.across.x.v3i32(<3 x i32> %[[#]])
 // CHECK: ret <3 x i32> %[[RET]]
 int3 test_int3(int3 expr) { return QuadReadAcrossX(expr); }
 
-// CHECK: %[[RET:.*]] = call [[CC]]<4 x i32> 
@llvm.[[ICF]].quad.read.across.x.v4i32(<4 x i32> %[[#]])
+// CHECK: %[[RET:.*]] = call [[CC]]<4 x i32> 
@llvm.[[TARGET]].quad.read.across.x.v4i32(<4 x i32> %[[#]])
 // CHECK: ret <4 x i32> %[[RET]]
 int4 test_int4(int4 expr) { return QuadReadAcrossX(expr); }
 
-// CHECK: %[[RET:.*]] = call [[CC]]i32 
@llvm.[[ICF]].quad.read.across.x.i32(i32 %[[#]])
+// CHECK: %[[RET:.*]] = call [[CC]]i32 
@llvm.[[TARGET]].quad.read.across.x.i32(i32 %[[#]])
 // CHECK: ret i32 %[[RET]]
 uint test_uint(uint expr) { return QuadReadAcrossX(expr); }
 
-// CHECK: %[[RET:.*]] = call [[CC]]<2 x i32> 
@llvm.[[ICF]].quad.read.across.x.v2i32(<2 x i32> %[[#]])
+// CHECK: %[[RET:.*]] = call [[CC]]<2 x i32> 
@llvm.[[TARGET]].quad.read.across.x.v2i32(<2 x i32> %[[#]])
 // CHECK: ret <2 x i32> %[[RET]]
 uint2 test_uint2(uint2 expr) { return QuadReadAcrossX(expr); }
 
-// CHECK: %[[RET:.*]] = call [[CC]]<3 x i32> 
@llvm.[[ICF]].quad.read.across.x.v3i32(<3 x i32> %[[#]])
+// CHECK: %[[RET:.*]] = call [[CC]]<3 x i32> 
@llvm.[[TARGET]].quad.read.across.x.v3i32(<3 x i32> %[[#]])
 // CHECK: ret <3 x i32> %[[RET]]
 uint3 test_uint3(uint3 expr) { return QuadReadAcrossX(expr); }
 
-// CHECK: %[[RET:.*]] = call [[CC]]<4 x i32> 
@llvm.[[ICF]].quad.read.across.x.v4i32(<4 x i32> %[[#]])
+// CHECK: %[[RET:.*]] = call [[CC]]<4 x i32> 
@llvm.[[TARGET]].quad.read.across.x.v4i32(<4 x i32> %[[#]])
 // CHECK: ret <4 x i32> %[[RET]]
 uint4 test_uint4(uint4 expr) { return QuadReadAcrossX(expr); }
 
-// CHECK: %[[RET:.*]] = call [[CC]]i64 
@llvm.[[ICF]].quad.read.across.x.i64(i64 %[[#]])
+// CHECK: %[[RET:.*]] = call [[CC]]i64 
@llvm.[[TARGET]].quad.read.across.x.i64(i64 %[[#]])
 // CHECK: ret i64 %[[RET]]
 int64_t test_int64_t(int64_t expr) { return QuadReadAcrossX(expr); }
 
-// CHECK: %[[RET:.*]] = call [[CC]]<2 x i64> 
@llvm.[[ICF]].quad.read.across.x.v2i64(<2 x i64> %[[#]])
+// CHECK: %[[RET:.*]] = call [[CC]]<2 x i64> 
@llvm.[[TARGET]].quad.read.across.x.v2i64(<2 x i64> %[[#]])
 // CHECK: ret <2 x i64> %[[RET]]
 int64_t2 test_int64_t2(int64_t2 expr) { return QuadReadAcrossX(expr); }
 
-// CHECK: %[[RET:.*]] = call [[CC]]<3 x i64> 
@llvm.[[ICF]].quad.read.across.x.v3i64(<3 x i64> %[[#]])
+// CHECK: %[[RET:.*]] = call [[CC]]<3 x i64> 
@llvm.[[TARGET]].quad.read.across.x.v3i64(<3 x i64> %[[#]])
 // CHECK: ret <3 x i64> %[[RET]]
 int64_t3 test_int64_t3(int64_t3 expr) { return QuadReadAcrossX(expr); }
 
-// CHECK: %[[RET:.*]] = call [[CC]]<4 x i64> 
@llvm.[[ICF]].quad.read.across.x.v4i64(<4 x i64> %[[#]])
+// CHECK: %[[RET:.*]] = call [[CC]]<4 x i64> 
@llvm.[[TARGET]].quad.read.across.x.v4i64(<4 x i64> %[[#]])
 // CHECK: ret <4 x i64> %[[RET]]
 int64_t4 test_int64_t4(int64_t4 expr) { return QuadReadAcrossX(expr); }
 
-// CHECK: %[[RET:.*]] = call [[CC]]i64 
@llvm.[[ICF]].quad.read.across.x.i64(i64 %[[#]])
+// CHECK: %[[RET:.*]] = call [[CC]]i64 
@llvm.[[TARGET]].quad.read.across.x.i64(i64 %[[#]])
 // CHECK: ret i64 %[[RET]]
 uint64_t test_uint64_t(uint64_t expr) { return QuadReadAcrossX(expr); }
 
-// CHECK: %[[RET:.*]] = call [[CC]]<2 x i64> 
@llvm.[[ICF]].quad.read.across.x.v2i64(<2 x i64> %[[#]])
+// CHECK: %[[RET:.*]] = call [[CC]]<2 x i64> 
@llvm.[[TARGET]].quad.read.across.x.v2i64(<2 x i64> %[[#]])
 // CHECK: ret <2 x i64> %[[RET]]
 uint64_t2 test_uint64_t2(uint64_t2 expr) { return QuadReadAcrossX(expr); }
 
-// CHECK: %[[RET:.*]] = call [[CC]]<3 x i64> 
@llvm.[[ICF]].quad.read.across.x.v3i64(<3 x i64> %[[#]])
+// CHECK: %[[RET:.*]] = call [[CC]]<3 x i64> 
@llvm.[[TARGET]].quad.read.across.x.v3i64(<3 x i64> %[[#]])
 // CHECK: ret <3 x i64> %[[RET]]
 uint64_t3 test_uint64_t3(uint64_t3 expr) { return QuadReadAcrossX(expr); }
 
-// CHECK: %[[RET:.*]] = call [[CC]]<4 x i64> 
@llvm.[[ICF]].quad.read.across.x.v4i64(<4 x i64> %[[#]])
+// CHECK: %[[RET:.*]] = call [[CC]]<4 x i64> 
@llvm.[[TARGET]].quad.read.across.x.v4i64(<4 x i64> %[[#]])
 // CHECK: ret <4 x i64> %[[RET]]
 uint64_t4 test_uint64_t4(uint64_t4 expr) { return QuadReadAcrossX(expr); }
 
-// CHECK: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]float 
@llvm.[[ICF]].quad.read.across.x.f32(float %[[#]])
+// CHECK: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]float 
@llvm.[[TARGET]].quad.read.across.x.f32(float %[[#]])
 // CHECK: ret float %[[RET]]
 float test_float(float expr) { return QuadReadAcrossX(expr); }
 
-// CHECK: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<2 x float> 
@llvm.[[ICF]].quad.read.across.x.v2f32(<2 x float> %[[#]])
+// CHECK: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<2 x float> 
@llvm.[[TARGET]].quad.read.across.x.v2f32(<2 x float> %[[#]])
 // CHECK: ret <2 x float> %[[RET]]
 float2 test_float2(float2 expr) { return QuadReadAcrossX(expr); }
 
-// CHECK: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<3 x float> 
@llvm.[[ICF]].quad.read.across.x.v3f32(<3 x float> %[[#]])
+// CHECK: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<3 x float> 
@llvm.[[TARGET]].quad.read.across.x.v3f32(<3 x float> %[[#]])
 // CHECK: ret <3 x float> %[[RET]]
 float3 test_float3(float3 expr) { return QuadReadAcrossX(expr); }
 
-// CHECK: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<4 x float> 
@llvm.[[ICF]].quad.read.across.x.v4f32(<4 x float> %[[#]])
+// CHECK: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<4 x float> 
@llvm.[[TARGET]].quad.read.across.x.v4f32(<4 x float> %[[#]])
 // CHECK: ret <4 x float> %[[RET]]
 float4 test_float4(float4 expr) { return QuadReadAcrossX(expr); }
 
-// CHECK: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]double 
@llvm.[[ICF]].quad.read.across.x.f64(double %[[#]])
+// CHECK: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]double 
@llvm.[[TARGET]].quad.read.across.x.f64(double %[[#]])
 // CHECK: ret double %[[RET]]
 double test_double(double expr) { return QuadReadAcrossX(expr); }
 
-// CHECK: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<2 x double> 
@llvm.[[ICF]].quad.read.across.x.v2f64(<2 x double> %[[#]])
+// CHECK: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<2 x double> 
@llvm.[[TARGET]].quad.read.across.x.v2f64(<2 x double> %[[#]])
 // CHECK: ret <2 x double> %[[RET]]
 double2 test_double2(double2 expr) { return QuadReadAcrossX(expr); }
 
-// CHECK: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<3 x double> 
@llvm.[[ICF]].quad.read.across.x.v3f64(<3 x double> %[[#]])
+// CHECK: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<3 x double> 
@llvm.[[TARGET]].quad.read.across.x.v3f64(<3 x double> %[[#]])
 // CHECK: ret <3 x double> %[[RET]]
 double3 test_double3(double3 expr) { return QuadReadAcrossX(expr); }
 
-// CHECK: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<4 x double> 
@llvm.[[ICF]].quad.read.across.x.v4f64(<4 x double> %[[#]])
+// CHECK: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<4 x double> 
@llvm.[[TARGET]].quad.read.across.x.v4f64(<4 x double> %[[#]])
 // CHECK: ret <4 x double> %[[RET]]
 double4 test_double4(double4 expr) { return QuadReadAcrossX(expr); }
 
-// CHECK-NATIVE_HALF: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn 
[[CC]]half @llvm.[[ICF]].quad.read.across.x.f16(half %[[#]])
+// CHECK-NATIVE_HALF: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn 
[[CC]]half @llvm.[[TARGET]].quad.read.across.x.f16(half %[[#]])
 // CHECK-NATIVE_HALF: ret half %[[RET]]
-// CHECK-NO_HALF: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn 
[[CC]]float @llvm.[[ICF]].quad.read.across.x.f32(float %[[#]])
+// CHECK-NO_HALF: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn 
[[CC]]float @llvm.[[TARGET]].quad.read.across.x.f32(float %[[#]])
 // CHECK-NO_HALF: ret float %[[RET]]
 half test_half(half expr) { return QuadReadAcrossX(expr); }
 
-// CHECK-NATIVE_HALF: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn 
[[CC]]<2 x half> @llvm.[[ICF]].quad.read.across.x.v2f16(<2 x half> %[[#]])
+// CHECK-NATIVE_HALF: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn 
[[CC]]<2 x half> @llvm.[[TARGET]].quad.read.across.x.v2f16(<2 x half> %[[#]])
 // CHECK-NATIVE_HALF: ret <2 x half> %[[RET]]
-// CHECK-NO_HALF: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<2 x 
float> @llvm.[[ICF]].quad.read.across.x.v2f32(<2 x float> %[[#]])
+// CHECK-NO_HALF: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<2 x 
float> @llvm.[[TARGET]].quad.read.across.x.v2f32(<2 x float> %[[#]])
 // CHECK-NO_HALF: ret <2 x float> %[[RET]]
 half2 test_half2(half2 expr) { return QuadReadAcrossX(expr); }
 
-// CHECK-NATIVE_HALF: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn 
[[CC]]<3 x half> @llvm.[[ICF]].quad.read.across.x.v3f16(<3 x half> %[[#]])
+// CHECK-NATIVE_HALF: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn 
[[CC]]<3 x half> @llvm.[[TARGET]].quad.read.across.x.v3f16(<3 x half> %[[#]])
 // CHECK-NATIVE_HALF: ret <3 x half> %[[RET]]
-// CHECK-NO_HALF: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<3 x 
float> @llvm.[[ICF]].quad.read.across.x.v3f32(<3 x float> %[[#]])
+// CHECK-NO_HALF: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<3 x 
float> @llvm.[[TARGET]].quad.read.across.x.v3f32(<3 x float> %[[#]])
 // CHECK-NO_HALF: ret <3 x float> %[[RET]]
 half3 test_half3(half3 expr) { return QuadReadAcrossX(expr); }
 
-// CHECK-NATIVE_HALF: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn 
[[CC]]<4 x half> @llvm.[[ICF]].quad.read.across.x.v4f16(<4 x half> %[[#]])
+// CHECK-NATIVE_HALF: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn 
[[CC]]<4 x half> @llvm.[[TARGET]].quad.read.across.x.v4f16(<4 x half> %[[#]])
 // CHECK-NATIVE_HALF: ret <4 x half> %[[RET]]
-// CHECK-NO_HALF: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<4 x 
float> @llvm.[[ICF]].quad.read.across.x.v4f32(<4 x float> %[[#]])
+// CHECK-NO_HALF: %[[RET:.*]] = call reassoc nnan ninf nsz arcp afn [[CC]]<4 x 
float> @llvm.[[TARGET]].quad.read.across.x.v4f32(<4 x float> %[[#]])
 // CHECK-NO_HALF: ret <4 x float> %[[RET]]
 half4 test_half4(half4 expr) { return QuadReadAcrossX(expr); }
 
 #ifdef __HLSL_ENABLE_16_BIT
-// CHECK-NATIVE_HALF: %[[RET:.*]] = call [[CC]]i16 
@llvm.[[ICF]].quad.read.across.x.i16(i16 %[[#]])
+// CHECK-NATIVE_HALF: %[[RET:.*]] = call [[CC]]i16 
@llvm.[[TARGET]].quad.read.across.x.i16(i16 %[[#]])
 // CHECK-NATIVE_HALF: ret i16 %[[RET]]
 int16_t test_int16_t(int16_t expr) { return QuadReadAcrossX(expr); }
 
-// CHECK-NATIVE_HALF: %[[RET:.*]] = call [[CC]]<2 x i16> 
@llvm.[[ICF]].quad.read.across.x.v2i16(<2 x i16> %[[#]])
+// CHECK-NATIVE_HALF: %[[RET:.*]] = call [[CC]]<2 x i16> 
@llvm.[[TARGET]].quad.read.across.x.v2i16(<2 x i16> %[[#]])
 // CHECK-NATIVE_HALF: ret <2 x i16> %[[RET]]
 int16_t2 test_int16_t2(int16_t2 expr) { return QuadReadAcrossX(expr); }
 
-// CHECK-NATIVE_HALF: %[[RET:.*]] = call [[CC]]<3 x i16> 
@llvm.[[ICF]].quad.read.across.x.v3i16(<3 x i16> %[[#]])
+// CHECK-NATIVE_HALF: %[[RET:.*]] = call [[CC]]<3 x i16> 
@llvm.[[TARGET]].quad.read.across.x.v3i16(<3 x i16> %[[#]])
 // CHECK-NATIVE_HALF: ret <3 x i16> %[[RET]]
 int16_t3 test_int16_t3(int16_t3 expr) { return QuadReadAcrossX(expr); }
 
-// CHECK-NATIVE_HALF: %[[RET:.*]] = call [[CC]]<4 x i16> 
@llvm.[[ICF]].quad.read.across.x.v4i16(<4 x i16> %[[#]])
+// CHECK-NATIVE_HALF: %[[RET:.*]] = call [[CC]]<4 x i16> 
@llvm.[[TARGET]].quad.read.across.x.v4i16(<4 x i16> %[[#]])
 // CHECK-NATIVE_HALF: ret <4 x i16> %[[RET]]
 int16_t4 test_int16_t4(int16_t4 expr) { return QuadReadAcrossX(expr); }
 
-// CHECK-NATIVE_HALF: %[[RET:.*]] = call [[CC]]i16 
@llvm.[[ICF]].quad.read.across.x.i16(i16 %[[#]])
+// CHECK-NATIVE_HALF: %[[RET:.*]] = call [[CC]]i16 
@llvm.[[TARGET]].quad.read.across.x.i16(i16 %[[#]])
 // CHECK-NATIVE_HALF: ret i16 %[[RET]]
 uint16_t test_uint16_t(uint16_t expr) { return QuadReadAcrossX(expr); }
 
-// CHECK-NATIVE_HALF: %[[RET:.*]] = call [[CC]]<2 x i16> 
@llvm.[[ICF]].quad.read.across.x.v2i16(<2 x i16> %[[#]])
+// CHECK-NATIVE_HALF: %[[RET:.*]] = call [[CC]]<2 x i16> 
@llvm.[[TARGET]].quad.read.across.x.v2i16(<2 x i16> %[[#]])
 // CHECK-NATIVE_HALF: ret <2 x i16> %[[RET]]
 uint16_t2 test_uint16_t2(uint16_t2 expr) { return QuadReadAcrossX(expr); }
 
-// CHECK-NATIVE_HALF: %[[RET:.*]] = call [[CC]]<3 x i16> 
@llvm.[[ICF]].quad.read.across.x.v3i16(<3 x i16> %[[#]])
+// CHECK-NATIVE_HALF: %[[RET:.*]] = call [[CC]]<3 x i16> 
@llvm.[[TARGET]].quad.read.across.x.v3i16(<3 x i16> %[[#]])
 // CHECK-NATIVE_HALF: ret <3 x i16> %[[RET]]
 uint16_t3 test_uint16_t3(uint16_t3 expr) { return QuadReadAcrossX(expr); }
 
-// CHECK-NATIVE_HALF: %[[RET:.*]] = call [[CC]]<4 x i16> 
@llvm.[[ICF]].quad.read.across.x.v4i16(<4 x i16> %[[#]])
+// CHECK-NATIVE_HALF: %[[RET:.*]] = call [[CC]]<4 x i16> 
@llvm.[[TARGET]].quad.read.across.x.v4i16(<4 x i16> %[[#]])
 // CHECK-NATIVE_HALF: ret <4 x i16> %[[RET]]
 uint16_t4 test_uint16_t4(uint16_t4 expr) { return QuadReadAcrossX(expr); }
 #endif

_______________________________________________
cfe-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to