https://github.com/tru updated https://github.com/llvm/llvm-project/pull/109915
>From b3734d9f93c1f8d908836a966f77c6792242df99 Mon Sep 17 00:00:00 2001 From: Weining Lu <luwein...@loongson.cn> Date: Mon, 19 Aug 2024 16:51:21 +0800 Subject: [PATCH] [LoongArch] Fix the assertion for atomic store with 'ptr' type (cherry picked from commit 63267ca9016aa334b329aa408716456b4e3799c8) --- .../LoongArch/LoongArchISelLowering.cpp | 5 +- .../ir-instruction/load-store-atomic.ll | 119 ++++++++++++++++++ 2 files changed, 122 insertions(+), 2 deletions(-) diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp index 93edafaff553ba..082b42398c6a71 100644 --- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp +++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp @@ -5601,8 +5601,9 @@ bool LoongArchTargetLowering::shouldInsertFencesForAtomic( // On LA64, atomic store operations with IntegerBitWidth of 32 and 64 do not // require fences beacuse we can use amswap_db.[w/d]. - if (isa<StoreInst>(I)) { - unsigned Size = I->getOperand(0)->getType()->getIntegerBitWidth(); + Type *Ty = I->getOperand(0)->getType(); + if (isa<StoreInst>(I) && Ty->isIntegerTy()) { + unsigned Size = Ty->getIntegerBitWidth(); return (Size == 8 || Size == 16); } diff --git a/llvm/test/CodeGen/LoongArch/ir-instruction/load-store-atomic.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/load-store-atomic.ll index c51fded410e83b..1af2b38d799436 100644 --- a/llvm/test/CodeGen/LoongArch/ir-instruction/load-store-atomic.ll +++ b/llvm/test/CodeGen/LoongArch/ir-instruction/load-store-atomic.ll @@ -72,6 +72,22 @@ define i64 @load_acquire_i64(ptr %ptr) { ret i64 %val } +define ptr @load_acquire_ptr(ptr %ptr) { +; LA32-LABEL: load_acquire_ptr: +; LA32: # %bb.0: +; LA32-NEXT: ld.w $a0, $a0, 0 +; LA32-NEXT: dbar 20 +; LA32-NEXT: ret +; +; LA64-LABEL: load_acquire_ptr: +; LA64: # %bb.0: +; LA64-NEXT: ld.d $a0, $a0, 0 +; LA64-NEXT: dbar 20 +; LA64-NEXT: ret + %val = load atomic ptr, ptr %ptr acquire, align 8 + ret ptr %val +} + define i8 @load_unordered_i8(ptr %ptr) { ; LA32-LABEL: load_unordered_i8: ; LA32: # %bb.0: @@ -135,6 +151,20 @@ define i64 @load_unordered_i64(ptr %ptr) { ret i64 %val } +define ptr @load_unordered_ptr(ptr %ptr) { +; LA32-LABEL: load_unordered_ptr: +; LA32: # %bb.0: +; LA32-NEXT: ld.w $a0, $a0, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: load_unordered_ptr: +; LA64: # %bb.0: +; LA64-NEXT: ld.d $a0, $a0, 0 +; LA64-NEXT: ret + %val = load atomic ptr, ptr %ptr unordered, align 8 + ret ptr %val +} + define i8 @load_monotonic_i8(ptr %ptr) { ; LA32-LABEL: load_monotonic_i8: ; LA32: # %bb.0: @@ -198,6 +228,20 @@ define i64 @load_monotonic_i64(ptr %ptr) { ret i64 %val } +define ptr @load_monotonic_ptr(ptr %ptr) { +; LA32-LABEL: load_monotonic_ptr: +; LA32: # %bb.0: +; LA32-NEXT: ld.w $a0, $a0, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: load_monotonic_ptr: +; LA64: # %bb.0: +; LA64-NEXT: ld.d $a0, $a0, 0 +; LA64-NEXT: ret + %val = load atomic ptr, ptr %ptr monotonic, align 8 + ret ptr %val +} + define i8 @load_seq_cst_i8(ptr %ptr) { ; LA32-LABEL: load_seq_cst_i8: ; LA32: # %bb.0: @@ -268,6 +312,22 @@ define i64 @load_seq_cst_i64(ptr %ptr) { ret i64 %val } +define ptr @load_seq_cst_ptr(ptr %ptr) { +; LA32-LABEL: load_seq_cst_ptr: +; LA32: # %bb.0: +; LA32-NEXT: ld.w $a0, $a0, 0 +; LA32-NEXT: dbar 16 +; LA32-NEXT: ret +; +; LA64-LABEL: load_seq_cst_ptr: +; LA64: # %bb.0: +; LA64-NEXT: ld.d $a0, $a0, 0 +; LA64-NEXT: dbar 16 +; LA64-NEXT: ret + %val = load atomic ptr, ptr %ptr seq_cst, align 8 + ret ptr %val +} + define void @store_release_i8(ptr %ptr, i8 signext %v) { ; LA32-LABEL: store_release_i8: ; LA32: # %bb.0: @@ -336,6 +396,21 @@ define void @store_release_i64(ptr %ptr, i64 %v) { ret void } +define void @store_release_ptr(ptr %ptr, ptr %v) { +; LA32-LABEL: store_release_ptr: +; LA32: # %bb.0: +; LA32-NEXT: dbar 18 +; LA32-NEXT: st.w $a1, $a0, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: store_release_ptr: +; LA64: # %bb.0: +; LA64-NEXT: amswap_db.d $zero, $a1, $a0 +; LA64-NEXT: ret + store atomic ptr %v, ptr %ptr release, align 8 + ret void +} + define void @store_unordered_i8(ptr %ptr, i8 signext %v) { ; LA32-LABEL: store_unordered_i8: ; LA32: # %bb.0: @@ -399,6 +474,20 @@ define void @store_unordered_i64(ptr %ptr, i64 %v) { ret void } +define void @store_unordered_ptr(ptr %ptr, ptr %v) { +; LA32-LABEL: store_unordered_ptr: +; LA32: # %bb.0: +; LA32-NEXT: st.w $a1, $a0, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: store_unordered_ptr: +; LA64: # %bb.0: +; LA64-NEXT: st.d $a1, $a0, 0 +; LA64-NEXT: ret + store atomic ptr %v, ptr %ptr unordered, align 8 + ret void +} + define void @store_monotonic_i8(ptr %ptr, i8 signext %v) { ; LA32-LABEL: store_monotonic_i8: ; LA32: # %bb.0: @@ -462,6 +551,20 @@ define void @store_monotonic_i64(ptr %ptr, i64 %v) { ret void } +define void @store_monotonic_ptr(ptr %ptr, ptr %v) { +; LA32-LABEL: store_monotonic_ptr: +; LA32: # %bb.0: +; LA32-NEXT: st.w $a1, $a0, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: store_monotonic_ptr: +; LA64: # %bb.0: +; LA64-NEXT: st.d $a1, $a0, 0 +; LA64-NEXT: ret + store atomic ptr %v, ptr %ptr monotonic, align 8 + ret void +} + define void @store_seq_cst_i8(ptr %ptr, i8 signext %v) { ; LA32-LABEL: store_seq_cst_i8: ; LA32: # %bb.0: @@ -534,3 +637,19 @@ define void @store_seq_cst_i64(ptr %ptr, i64 %v) { store atomic i64 %v, ptr %ptr seq_cst, align 8 ret void } + +define void @store_seq_cst_ptr(ptr %ptr, ptr %v) { +; LA32-LABEL: store_seq_cst_ptr: +; LA32: # %bb.0: +; LA32-NEXT: dbar 16 +; LA32-NEXT: st.w $a1, $a0, 0 +; LA32-NEXT: dbar 16 +; LA32-NEXT: ret +; +; LA64-LABEL: store_seq_cst_ptr: +; LA64: # %bb.0: +; LA64-NEXT: amswap_db.d $zero, $a1, $a0 +; LA64-NEXT: ret + store atomic ptr %v, ptr %ptr seq_cst, align 8 + ret void +} _______________________________________________ llvm-branch-commits mailing list llvm-branch-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits