lkail updated this revision to Diff 418537.
lkail added a comment.

Thanks @efriedma for pointing out. Updated guard code. Also adjust backend to 
reflect ABI change.


Repository:
  rG LLVM Github Monorepo

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D122377/new/

https://reviews.llvm.org/D122377

Files:
  clang/lib/Basic/Targets/PPC.cpp
  clang/lib/Basic/Targets/PPC.h
  clang/test/CodeGen/PowerPC/atomic-alignment.c
  clang/test/CodeGen/PowerPC/quadword-atomics.c
  clang/test/Sema/atomic-ops.c
  llvm/lib/Target/PowerPC/PPCISelLowering.cpp
  llvm/lib/Target/PowerPC/PPCISelLowering.h
  llvm/test/CodeGen/PowerPC/atomics-i128.ll

Index: llvm/test/CodeGen/PowerPC/atomics-i128.ll
===================================================================
--- llvm/test/CodeGen/PowerPC/atomics-i128.ll
+++ llvm/test/CodeGen/PowerPC/atomics-i128.ll
@@ -5,6 +5,15 @@
 ; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-unknown -mcpu=pwr7 \
 ; RUN:   -ppc-asm-full-reg-names -ppc-quadword-atomics \
 ; RUN:   -ppc-track-subreg-liveness < %s | FileCheck --check-prefix=PWR7 %s
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr8 \
+; RUN:   -ppc-asm-full-reg-names -ppc-track-subreg-liveness < %s | FileCheck \
+; RUN:   --check-prefix=LE-PWR8 %s
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-freebsd -mcpu=pwr8 \
+; RUN:   -ppc-asm-full-reg-names -ppc-track-subreg-liveness < %s | FileCheck \
+; RUN:   --check-prefix=LE-PWR8 %s
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-aix -mcpu=pwr8 \
+; RUN:   -ppc-asm-full-reg-names -ppc-track-subreg-liveness < %s | FileCheck \
+; RUN:   --check-prefix=AIX64-PWR8 %s
 
 
 define i128 @swap(i128* %a, i128 %x) {
@@ -39,6 +48,36 @@
 ; PWR7-NEXT:    ld r0, 16(r1)
 ; PWR7-NEXT:    mtlr r0
 ; PWR7-NEXT:    blr
+;
+; LE-PWR8-LABEL: swap:
+; LE-PWR8:       # %bb.0: # %entry
+; LE-PWR8-NEXT:    sync
+; LE-PWR8-NEXT:  .LBB0_1: # %entry
+; LE-PWR8-NEXT:    #
+; LE-PWR8-NEXT:    lqarx r6, 0, r3
+; LE-PWR8-NEXT:    mr r9, r4
+; LE-PWR8-NEXT:    mr r8, r5
+; LE-PWR8-NEXT:    stqcx. r8, 0, r3
+; LE-PWR8-NEXT:    bne cr0, .LBB0_1
+; LE-PWR8-NEXT:  # %bb.2: # %entry
+; LE-PWR8-NEXT:    lwsync
+; LE-PWR8-NEXT:    mr r3, r7
+; LE-PWR8-NEXT:    mr r4, r6
+; LE-PWR8-NEXT:    blr
+;
+; AIX64-PWR8-LABEL: swap:
+; AIX64-PWR8:       # %bb.0: # %entry
+; AIX64-PWR8-NEXT:    mflr r0
+; AIX64-PWR8-NEXT:    std r0, 16(r1)
+; AIX64-PWR8-NEXT:    stdu r1, -112(r1)
+; AIX64-PWR8-NEXT:    sync
+; AIX64-PWR8-NEXT:    bl .__sync_lock_test_and_set_16[PR]
+; AIX64-PWR8-NEXT:    nop
+; AIX64-PWR8-NEXT:    lwsync
+; AIX64-PWR8-NEXT:    addi r1, r1, 112
+; AIX64-PWR8-NEXT:    ld r0, 16(r1)
+; AIX64-PWR8-NEXT:    mtlr r0
+; AIX64-PWR8-NEXT:    blr
 entry:
   %0 = atomicrmw xchg i128* %a, i128 %x seq_cst, align 16
   ret i128 %0
@@ -76,6 +115,36 @@
 ; PWR7-NEXT:    ld r0, 16(r1)
 ; PWR7-NEXT:    mtlr r0
 ; PWR7-NEXT:    blr
+;
+; LE-PWR8-LABEL: add:
+; LE-PWR8:       # %bb.0: # %entry
+; LE-PWR8-NEXT:    sync
+; LE-PWR8-NEXT:  .LBB1_1: # %entry
+; LE-PWR8-NEXT:    #
+; LE-PWR8-NEXT:    lqarx r6, 0, r3
+; LE-PWR8-NEXT:    addc r9, r4, r7
+; LE-PWR8-NEXT:    adde r8, r5, r6
+; LE-PWR8-NEXT:    stqcx. r8, 0, r3
+; LE-PWR8-NEXT:    bne cr0, .LBB1_1
+; LE-PWR8-NEXT:  # %bb.2: # %entry
+; LE-PWR8-NEXT:    lwsync
+; LE-PWR8-NEXT:    mr r3, r7
+; LE-PWR8-NEXT:    mr r4, r6
+; LE-PWR8-NEXT:    blr
+;
+; AIX64-PWR8-LABEL: add:
+; AIX64-PWR8:       # %bb.0: # %entry
+; AIX64-PWR8-NEXT:    mflr r0
+; AIX64-PWR8-NEXT:    std r0, 16(r1)
+; AIX64-PWR8-NEXT:    stdu r1, -112(r1)
+; AIX64-PWR8-NEXT:    sync
+; AIX64-PWR8-NEXT:    bl .__sync_fetch_and_add_16[PR]
+; AIX64-PWR8-NEXT:    nop
+; AIX64-PWR8-NEXT:    lwsync
+; AIX64-PWR8-NEXT:    addi r1, r1, 112
+; AIX64-PWR8-NEXT:    ld r0, 16(r1)
+; AIX64-PWR8-NEXT:    mtlr r0
+; AIX64-PWR8-NEXT:    blr
 entry:
   %0 = atomicrmw add i128* %a, i128 %x seq_cst, align 16
   ret i128 %0
@@ -113,6 +182,36 @@
 ; PWR7-NEXT:    ld r0, 16(r1)
 ; PWR7-NEXT:    mtlr r0
 ; PWR7-NEXT:    blr
+;
+; LE-PWR8-LABEL: sub:
+; LE-PWR8:       # %bb.0: # %entry
+; LE-PWR8-NEXT:    sync
+; LE-PWR8-NEXT:  .LBB2_1: # %entry
+; LE-PWR8-NEXT:    #
+; LE-PWR8-NEXT:    lqarx r6, 0, r3
+; LE-PWR8-NEXT:    subc r9, r7, r4
+; LE-PWR8-NEXT:    subfe r8, r5, r6
+; LE-PWR8-NEXT:    stqcx. r8, 0, r3
+; LE-PWR8-NEXT:    bne cr0, .LBB2_1
+; LE-PWR8-NEXT:  # %bb.2: # %entry
+; LE-PWR8-NEXT:    lwsync
+; LE-PWR8-NEXT:    mr r3, r7
+; LE-PWR8-NEXT:    mr r4, r6
+; LE-PWR8-NEXT:    blr
+;
+; AIX64-PWR8-LABEL: sub:
+; AIX64-PWR8:       # %bb.0: # %entry
+; AIX64-PWR8-NEXT:    mflr r0
+; AIX64-PWR8-NEXT:    std r0, 16(r1)
+; AIX64-PWR8-NEXT:    stdu r1, -112(r1)
+; AIX64-PWR8-NEXT:    sync
+; AIX64-PWR8-NEXT:    bl .__sync_fetch_and_sub_16[PR]
+; AIX64-PWR8-NEXT:    nop
+; AIX64-PWR8-NEXT:    lwsync
+; AIX64-PWR8-NEXT:    addi r1, r1, 112
+; AIX64-PWR8-NEXT:    ld r0, 16(r1)
+; AIX64-PWR8-NEXT:    mtlr r0
+; AIX64-PWR8-NEXT:    blr
 entry:
   %0 = atomicrmw sub i128* %a, i128 %x seq_cst, align 16
   ret i128 %0
@@ -150,6 +249,36 @@
 ; PWR7-NEXT:    ld r0, 16(r1)
 ; PWR7-NEXT:    mtlr r0
 ; PWR7-NEXT:    blr
+;
+; LE-PWR8-LABEL: and:
+; LE-PWR8:       # %bb.0: # %entry
+; LE-PWR8-NEXT:    sync
+; LE-PWR8-NEXT:  .LBB3_1: # %entry
+; LE-PWR8-NEXT:    #
+; LE-PWR8-NEXT:    lqarx r6, 0, r3
+; LE-PWR8-NEXT:    and r9, r4, r7
+; LE-PWR8-NEXT:    and r8, r5, r6
+; LE-PWR8-NEXT:    stqcx. r8, 0, r3
+; LE-PWR8-NEXT:    bne cr0, .LBB3_1
+; LE-PWR8-NEXT:  # %bb.2: # %entry
+; LE-PWR8-NEXT:    lwsync
+; LE-PWR8-NEXT:    mr r3, r7
+; LE-PWR8-NEXT:    mr r4, r6
+; LE-PWR8-NEXT:    blr
+;
+; AIX64-PWR8-LABEL: and:
+; AIX64-PWR8:       # %bb.0: # %entry
+; AIX64-PWR8-NEXT:    mflr r0
+; AIX64-PWR8-NEXT:    std r0, 16(r1)
+; AIX64-PWR8-NEXT:    stdu r1, -112(r1)
+; AIX64-PWR8-NEXT:    sync
+; AIX64-PWR8-NEXT:    bl .__sync_fetch_and_and_16[PR]
+; AIX64-PWR8-NEXT:    nop
+; AIX64-PWR8-NEXT:    lwsync
+; AIX64-PWR8-NEXT:    addi r1, r1, 112
+; AIX64-PWR8-NEXT:    ld r0, 16(r1)
+; AIX64-PWR8-NEXT:    mtlr r0
+; AIX64-PWR8-NEXT:    blr
 entry:
   %0 = atomicrmw and i128* %a, i128 %x seq_cst, align 16
   ret i128 %0
@@ -187,6 +316,36 @@
 ; PWR7-NEXT:    ld r0, 16(r1)
 ; PWR7-NEXT:    mtlr r0
 ; PWR7-NEXT:    blr
+;
+; LE-PWR8-LABEL: or:
+; LE-PWR8:       # %bb.0: # %entry
+; LE-PWR8-NEXT:    sync
+; LE-PWR8-NEXT:  .LBB4_1: # %entry
+; LE-PWR8-NEXT:    #
+; LE-PWR8-NEXT:    lqarx r6, 0, r3
+; LE-PWR8-NEXT:    or r9, r4, r7
+; LE-PWR8-NEXT:    or r8, r5, r6
+; LE-PWR8-NEXT:    stqcx. r8, 0, r3
+; LE-PWR8-NEXT:    bne cr0, .LBB4_1
+; LE-PWR8-NEXT:  # %bb.2: # %entry
+; LE-PWR8-NEXT:    lwsync
+; LE-PWR8-NEXT:    mr r3, r7
+; LE-PWR8-NEXT:    mr r4, r6
+; LE-PWR8-NEXT:    blr
+;
+; AIX64-PWR8-LABEL: or:
+; AIX64-PWR8:       # %bb.0: # %entry
+; AIX64-PWR8-NEXT:    mflr r0
+; AIX64-PWR8-NEXT:    std r0, 16(r1)
+; AIX64-PWR8-NEXT:    stdu r1, -112(r1)
+; AIX64-PWR8-NEXT:    sync
+; AIX64-PWR8-NEXT:    bl .__sync_fetch_and_or_16[PR]
+; AIX64-PWR8-NEXT:    nop
+; AIX64-PWR8-NEXT:    lwsync
+; AIX64-PWR8-NEXT:    addi r1, r1, 112
+; AIX64-PWR8-NEXT:    ld r0, 16(r1)
+; AIX64-PWR8-NEXT:    mtlr r0
+; AIX64-PWR8-NEXT:    blr
 entry:
   %0 = atomicrmw or i128* %a, i128 %x seq_cst, align 16
   ret i128 %0
@@ -224,6 +383,36 @@
 ; PWR7-NEXT:    ld r0, 16(r1)
 ; PWR7-NEXT:    mtlr r0
 ; PWR7-NEXT:    blr
+;
+; LE-PWR8-LABEL: xor:
+; LE-PWR8:       # %bb.0: # %entry
+; LE-PWR8-NEXT:    sync
+; LE-PWR8-NEXT:  .LBB5_1: # %entry
+; LE-PWR8-NEXT:    #
+; LE-PWR8-NEXT:    lqarx r6, 0, r3
+; LE-PWR8-NEXT:    xor r9, r4, r7
+; LE-PWR8-NEXT:    xor r8, r5, r6
+; LE-PWR8-NEXT:    stqcx. r8, 0, r3
+; LE-PWR8-NEXT:    bne cr0, .LBB5_1
+; LE-PWR8-NEXT:  # %bb.2: # %entry
+; LE-PWR8-NEXT:    lwsync
+; LE-PWR8-NEXT:    mr r3, r7
+; LE-PWR8-NEXT:    mr r4, r6
+; LE-PWR8-NEXT:    blr
+;
+; AIX64-PWR8-LABEL: xor:
+; AIX64-PWR8:       # %bb.0: # %entry
+; AIX64-PWR8-NEXT:    mflr r0
+; AIX64-PWR8-NEXT:    std r0, 16(r1)
+; AIX64-PWR8-NEXT:    stdu r1, -112(r1)
+; AIX64-PWR8-NEXT:    sync
+; AIX64-PWR8-NEXT:    bl .__sync_fetch_and_xor_16[PR]
+; AIX64-PWR8-NEXT:    nop
+; AIX64-PWR8-NEXT:    lwsync
+; AIX64-PWR8-NEXT:    addi r1, r1, 112
+; AIX64-PWR8-NEXT:    ld r0, 16(r1)
+; AIX64-PWR8-NEXT:    mtlr r0
+; AIX64-PWR8-NEXT:    blr
 entry:
   %0 = atomicrmw xor i128* %a, i128 %x seq_cst, align 16
   ret i128 %0
@@ -261,6 +450,36 @@
 ; PWR7-NEXT:    ld r0, 16(r1)
 ; PWR7-NEXT:    mtlr r0
 ; PWR7-NEXT:    blr
+;
+; LE-PWR8-LABEL: nand:
+; LE-PWR8:       # %bb.0: # %entry
+; LE-PWR8-NEXT:    sync
+; LE-PWR8-NEXT:  .LBB6_1: # %entry
+; LE-PWR8-NEXT:    #
+; LE-PWR8-NEXT:    lqarx r6, 0, r3
+; LE-PWR8-NEXT:    nand r9, r4, r7
+; LE-PWR8-NEXT:    nand r8, r5, r6
+; LE-PWR8-NEXT:    stqcx. r8, 0, r3
+; LE-PWR8-NEXT:    bne cr0, .LBB6_1
+; LE-PWR8-NEXT:  # %bb.2: # %entry
+; LE-PWR8-NEXT:    lwsync
+; LE-PWR8-NEXT:    mr r3, r7
+; LE-PWR8-NEXT:    mr r4, r6
+; LE-PWR8-NEXT:    blr
+;
+; AIX64-PWR8-LABEL: nand:
+; AIX64-PWR8:       # %bb.0: # %entry
+; AIX64-PWR8-NEXT:    mflr r0
+; AIX64-PWR8-NEXT:    std r0, 16(r1)
+; AIX64-PWR8-NEXT:    stdu r1, -112(r1)
+; AIX64-PWR8-NEXT:    sync
+; AIX64-PWR8-NEXT:    bl .__sync_fetch_and_nand_16[PR]
+; AIX64-PWR8-NEXT:    nop
+; AIX64-PWR8-NEXT:    lwsync
+; AIX64-PWR8-NEXT:    addi r1, r1, 112
+; AIX64-PWR8-NEXT:    ld r0, 16(r1)
+; AIX64-PWR8-NEXT:    mtlr r0
+; AIX64-PWR8-NEXT:    blr
 entry:
   %0 = atomicrmw nand i128* %a, i128 %x seq_cst, align 16
   ret i128 %0
@@ -306,6 +525,43 @@
 ; PWR7-NEXT:    ld r0, 16(r1)
 ; PWR7-NEXT:    mtlr r0
 ; PWR7-NEXT:    blr
+;
+; LE-PWR8-LABEL: cas_weak_acquire_acquire:
+; LE-PWR8:       # %bb.0: # %entry
+; LE-PWR8-NEXT:  .LBB7_1: # %entry
+; LE-PWR8-NEXT:    #
+; LE-PWR8-NEXT:    lqarx r8, 0, r3
+; LE-PWR8-NEXT:    xor r11, r9, r4
+; LE-PWR8-NEXT:    xor r10, r8, r5
+; LE-PWR8-NEXT:    or. r11, r11, r10
+; LE-PWR8-NEXT:    bne cr0, .LBB7_3
+; LE-PWR8-NEXT:  # %bb.2: # %entry
+; LE-PWR8-NEXT:    #
+; LE-PWR8-NEXT:    mr r11, r6
+; LE-PWR8-NEXT:    mr r10, r7
+; LE-PWR8-NEXT:    stqcx. r10, 0, r3
+; LE-PWR8-NEXT:    bne cr0, .LBB7_1
+; LE-PWR8-NEXT:    b .LBB7_4
+; LE-PWR8-NEXT:  .LBB7_3: # %entry
+; LE-PWR8-NEXT:    stqcx. r8, 0, r3
+; LE-PWR8-NEXT:  .LBB7_4: # %entry
+; LE-PWR8-NEXT:    lwsync
+; LE-PWR8-NEXT:    mr r3, r9
+; LE-PWR8-NEXT:    mr r4, r8
+; LE-PWR8-NEXT:    blr
+;
+; AIX64-PWR8-LABEL: cas_weak_acquire_acquire:
+; AIX64-PWR8:       # %bb.0: # %entry
+; AIX64-PWR8-NEXT:    mflr r0
+; AIX64-PWR8-NEXT:    std r0, 16(r1)
+; AIX64-PWR8-NEXT:    stdu r1, -112(r1)
+; AIX64-PWR8-NEXT:    bl .__sync_val_compare_and_swap_16[PR]
+; AIX64-PWR8-NEXT:    nop
+; AIX64-PWR8-NEXT:    lwsync
+; AIX64-PWR8-NEXT:    addi r1, r1, 112
+; AIX64-PWR8-NEXT:    ld r0, 16(r1)
+; AIX64-PWR8-NEXT:    mtlr r0
+; AIX64-PWR8-NEXT:    blr
 entry:
   %0 = cmpxchg weak i128* %a, i128 %cmp, i128 %new acquire acquire
   %1 = extractvalue { i128, i1 } %0, 0
@@ -351,6 +607,43 @@
 ; PWR7-NEXT:    ld r0, 16(r1)
 ; PWR7-NEXT:    mtlr r0
 ; PWR7-NEXT:    blr
+;
+; LE-PWR8-LABEL: cas_weak_release_monotonic:
+; LE-PWR8:       # %bb.0: # %entry
+; LE-PWR8-NEXT:    lwsync
+; LE-PWR8-NEXT:  .LBB8_1: # %entry
+; LE-PWR8-NEXT:    #
+; LE-PWR8-NEXT:    lqarx r8, 0, r3
+; LE-PWR8-NEXT:    xor r11, r9, r4
+; LE-PWR8-NEXT:    xor r10, r8, r5
+; LE-PWR8-NEXT:    or. r11, r11, r10
+; LE-PWR8-NEXT:    bne cr0, .LBB8_3
+; LE-PWR8-NEXT:  # %bb.2: # %entry
+; LE-PWR8-NEXT:    #
+; LE-PWR8-NEXT:    mr r11, r6
+; LE-PWR8-NEXT:    mr r10, r7
+; LE-PWR8-NEXT:    stqcx. r10, 0, r3
+; LE-PWR8-NEXT:    bne cr0, .LBB8_1
+; LE-PWR8-NEXT:    b .LBB8_4
+; LE-PWR8-NEXT:  .LBB8_3: # %entry
+; LE-PWR8-NEXT:    stqcx. r8, 0, r3
+; LE-PWR8-NEXT:  .LBB8_4: # %entry
+; LE-PWR8-NEXT:    mr r3, r9
+; LE-PWR8-NEXT:    mr r4, r8
+; LE-PWR8-NEXT:    blr
+;
+; AIX64-PWR8-LABEL: cas_weak_release_monotonic:
+; AIX64-PWR8:       # %bb.0: # %entry
+; AIX64-PWR8-NEXT:    mflr r0
+; AIX64-PWR8-NEXT:    std r0, 16(r1)
+; AIX64-PWR8-NEXT:    stdu r1, -112(r1)
+; AIX64-PWR8-NEXT:    lwsync
+; AIX64-PWR8-NEXT:    bl .__sync_val_compare_and_swap_16[PR]
+; AIX64-PWR8-NEXT:    nop
+; AIX64-PWR8-NEXT:    addi r1, r1, 112
+; AIX64-PWR8-NEXT:    ld r0, 16(r1)
+; AIX64-PWR8-NEXT:    mtlr r0
+; AIX64-PWR8-NEXT:    blr
 entry:
   %0 = cmpxchg weak i128* %a, i128 %cmp, i128 %new release monotonic
   %1 = extractvalue { i128, i1 } %0, 0
@@ -398,6 +691,45 @@
 ; PWR7-NEXT:    ld r0, 16(r1)
 ; PWR7-NEXT:    mtlr r0
 ; PWR7-NEXT:    blr
+;
+; LE-PWR8-LABEL: cas_sc_sc:
+; LE-PWR8:       # %bb.0: # %entry
+; LE-PWR8-NEXT:    sync
+; LE-PWR8-NEXT:  .LBB9_1: # %entry
+; LE-PWR8-NEXT:    #
+; LE-PWR8-NEXT:    lqarx r8, 0, r3
+; LE-PWR8-NEXT:    xor r11, r9, r4
+; LE-PWR8-NEXT:    xor r10, r8, r5
+; LE-PWR8-NEXT:    or. r11, r11, r10
+; LE-PWR8-NEXT:    bne cr0, .LBB9_3
+; LE-PWR8-NEXT:  # %bb.2: # %entry
+; LE-PWR8-NEXT:    #
+; LE-PWR8-NEXT:    mr r11, r6
+; LE-PWR8-NEXT:    mr r10, r7
+; LE-PWR8-NEXT:    stqcx. r10, 0, r3
+; LE-PWR8-NEXT:    bne cr0, .LBB9_1
+; LE-PWR8-NEXT:    b .LBB9_4
+; LE-PWR8-NEXT:  .LBB9_3: # %entry
+; LE-PWR8-NEXT:    stqcx. r8, 0, r3
+; LE-PWR8-NEXT:  .LBB9_4: # %entry
+; LE-PWR8-NEXT:    lwsync
+; LE-PWR8-NEXT:    mr r3, r9
+; LE-PWR8-NEXT:    mr r4, r8
+; LE-PWR8-NEXT:    blr
+;
+; AIX64-PWR8-LABEL: cas_sc_sc:
+; AIX64-PWR8:       # %bb.0: # %entry
+; AIX64-PWR8-NEXT:    mflr r0
+; AIX64-PWR8-NEXT:    std r0, 16(r1)
+; AIX64-PWR8-NEXT:    stdu r1, -112(r1)
+; AIX64-PWR8-NEXT:    sync
+; AIX64-PWR8-NEXT:    bl .__sync_val_compare_and_swap_16[PR]
+; AIX64-PWR8-NEXT:    nop
+; AIX64-PWR8-NEXT:    lwsync
+; AIX64-PWR8-NEXT:    addi r1, r1, 112
+; AIX64-PWR8-NEXT:    ld r0, 16(r1)
+; AIX64-PWR8-NEXT:    mtlr r0
+; AIX64-PWR8-NEXT:    blr
 entry:
   %0 = cmpxchg i128* %a, i128 %cmp, i128 %new seq_cst seq_cst
   %1 = extractvalue { i128, i1 } %0, 0
@@ -445,6 +777,45 @@
 ; PWR7-NEXT:    ld r0, 16(r1)
 ; PWR7-NEXT:    mtlr r0
 ; PWR7-NEXT:    blr
+;
+; LE-PWR8-LABEL: cas_acqrel_acquire:
+; LE-PWR8:       # %bb.0: # %entry
+; LE-PWR8-NEXT:    lwsync
+; LE-PWR8-NEXT:  .LBB10_1: # %entry
+; LE-PWR8-NEXT:    #
+; LE-PWR8-NEXT:    lqarx r8, 0, r3
+; LE-PWR8-NEXT:    xor r11, r9, r4
+; LE-PWR8-NEXT:    xor r10, r8, r5
+; LE-PWR8-NEXT:    or. r11, r11, r10
+; LE-PWR8-NEXT:    bne cr0, .LBB10_3
+; LE-PWR8-NEXT:  # %bb.2: # %entry
+; LE-PWR8-NEXT:    #
+; LE-PWR8-NEXT:    mr r11, r6
+; LE-PWR8-NEXT:    mr r10, r7
+; LE-PWR8-NEXT:    stqcx. r10, 0, r3
+; LE-PWR8-NEXT:    bne cr0, .LBB10_1
+; LE-PWR8-NEXT:    b .LBB10_4
+; LE-PWR8-NEXT:  .LBB10_3: # %entry
+; LE-PWR8-NEXT:    stqcx. r8, 0, r3
+; LE-PWR8-NEXT:  .LBB10_4: # %entry
+; LE-PWR8-NEXT:    lwsync
+; LE-PWR8-NEXT:    mr r3, r9
+; LE-PWR8-NEXT:    mr r4, r8
+; LE-PWR8-NEXT:    blr
+;
+; AIX64-PWR8-LABEL: cas_acqrel_acquire:
+; AIX64-PWR8:       # %bb.0: # %entry
+; AIX64-PWR8-NEXT:    mflr r0
+; AIX64-PWR8-NEXT:    std r0, 16(r1)
+; AIX64-PWR8-NEXT:    stdu r1, -112(r1)
+; AIX64-PWR8-NEXT:    lwsync
+; AIX64-PWR8-NEXT:    bl .__sync_val_compare_and_swap_16[PR]
+; AIX64-PWR8-NEXT:    nop
+; AIX64-PWR8-NEXT:    lwsync
+; AIX64-PWR8-NEXT:    addi r1, r1, 112
+; AIX64-PWR8-NEXT:    ld r0, 16(r1)
+; AIX64-PWR8-NEXT:    mtlr r0
+; AIX64-PWR8-NEXT:    blr
 entry:
   %0 = cmpxchg i128* %a, i128 %cmp, i128 %new acq_rel acquire
   %1 = extractvalue { i128, i1 } %0, 0
@@ -508,6 +879,59 @@
 ; PWR7-NEXT:    ld r0, 16(r1)
 ; PWR7-NEXT:    mtlr r0
 ; PWR7-NEXT:    blr
+;
+; LE-PWR8-LABEL: cas_acqrel_acquire_check_succ:
+; LE-PWR8:       # %bb.0: # %entry
+; LE-PWR8-NEXT:    lwsync
+; LE-PWR8-NEXT:  .LBB11_1: # %entry
+; LE-PWR8-NEXT:    #
+; LE-PWR8-NEXT:    lqarx r8, 0, r3
+; LE-PWR8-NEXT:    xor r11, r9, r4
+; LE-PWR8-NEXT:    xor r10, r8, r5
+; LE-PWR8-NEXT:    or. r11, r11, r10
+; LE-PWR8-NEXT:    bne cr0, .LBB11_3
+; LE-PWR8-NEXT:  # %bb.2: # %entry
+; LE-PWR8-NEXT:    #
+; LE-PWR8-NEXT:    mr r11, r6
+; LE-PWR8-NEXT:    mr r10, r7
+; LE-PWR8-NEXT:    stqcx. r10, 0, r3
+; LE-PWR8-NEXT:    bne cr0, .LBB11_1
+; LE-PWR8-NEXT:    b .LBB11_4
+; LE-PWR8-NEXT:  .LBB11_3: # %entry
+; LE-PWR8-NEXT:    stqcx. r8, 0, r3
+; LE-PWR8-NEXT:  .LBB11_4: # %entry
+; LE-PWR8-NEXT:    lwsync
+; LE-PWR8-NEXT:    xor r3, r5, r8
+; LE-PWR8-NEXT:    xor r4, r4, r9
+; LE-PWR8-NEXT:    or r3, r4, r3
+; LE-PWR8-NEXT:    cntlzd r3, r3
+; LE-PWR8-NEXT:    rldicl r3, r3, 58, 63
+; LE-PWR8-NEXT:    blr
+;
+; AIX64-PWR8-LABEL: cas_acqrel_acquire_check_succ:
+; AIX64-PWR8:       # %bb.0: # %entry
+; AIX64-PWR8-NEXT:    mflr r0
+; AIX64-PWR8-NEXT:    std r0, 16(r1)
+; AIX64-PWR8-NEXT:    stdu r1, -128(r1)
+; AIX64-PWR8-NEXT:    std r30, 112(r1) # 8-byte Folded Spill
+; AIX64-PWR8-NEXT:    std r31, 120(r1) # 8-byte Folded Spill
+; AIX64-PWR8-NEXT:    mr r31, r5
+; AIX64-PWR8-NEXT:    mr r30, r4
+; AIX64-PWR8-NEXT:    lwsync
+; AIX64-PWR8-NEXT:    bl .__sync_val_compare_and_swap_16[PR]
+; AIX64-PWR8-NEXT:    nop
+; AIX64-PWR8-NEXT:    xor r3, r3, r30
+; AIX64-PWR8-NEXT:    xor r4, r4, r31
+; AIX64-PWR8-NEXT:    lwsync
+; AIX64-PWR8-NEXT:    or r3, r4, r3
+; AIX64-PWR8-NEXT:    ld r31, 120(r1) # 8-byte Folded Reload
+; AIX64-PWR8-NEXT:    ld r30, 112(r1) # 8-byte Folded Reload
+; AIX64-PWR8-NEXT:    cntlzd r3, r3
+; AIX64-PWR8-NEXT:    rldicl r3, r3, 58, 63
+; AIX64-PWR8-NEXT:    addi r1, r1, 128
+; AIX64-PWR8-NEXT:    ld r0, 16(r1)
+; AIX64-PWR8-NEXT:    mtlr r0
+; AIX64-PWR8-NEXT:    blr
 entry:
   %0 = cmpxchg i128* %a, i128 %cmp, i128 %new acq_rel acquire
   %1 = extractvalue { i128, i1 } %0, 1
Index: llvm/lib/Target/PowerPC/PPCISelLowering.h
===================================================================
--- llvm/lib/Target/PowerPC/PPCISelLowering.h
+++ llvm/lib/Target/PowerPC/PPCISelLowering.h
@@ -910,6 +910,8 @@
     Instruction *emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst,
                                    AtomicOrdering Ord) const override;
 
+    bool shouldInlineQuadwordAtomics() const;
+
     TargetLowering::AtomicExpansionKind
     shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
 
Index: llvm/lib/Target/PowerPC/PPCISelLowering.cpp
===================================================================
--- llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -1321,7 +1321,7 @@
     setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand);
   }
 
-  if (EnableQuadwordAtomics && Subtarget.hasQuadwordAtomics()) {
+  if (shouldInlineQuadwordAtomics()) {
     setMaxAtomicSizeInBitsSupported(128);
     setOperationAction(ISD::ATOMIC_LOAD, MVT::i128, Custom);
     setOperationAction(ISD::ATOMIC_STORE, MVT::i128, Custom);
@@ -18038,10 +18038,16 @@
   }
 }
 
+bool PPCTargetLowering::shouldInlineQuadwordAtomics() const {
+  return (EnableQuadwordAtomics ||
+          Subtarget.getTargetTriple().isOSBinFormatELF()) &&
+         Subtarget.hasQuadwordAtomics();
+}
+
 TargetLowering::AtomicExpansionKind
 PPCTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
   unsigned Size = AI->getType()->getPrimitiveSizeInBits();
-  if (EnableQuadwordAtomics && Subtarget.hasQuadwordAtomics() && Size == 128)
+  if (shouldInlineQuadwordAtomics() && Size == 128)
     return AtomicExpansionKind::MaskedIntrinsic;
   return TargetLowering::shouldExpandAtomicRMWInIR(AI);
 }
@@ -18049,7 +18055,7 @@
 TargetLowering::AtomicExpansionKind
 PPCTargetLowering::shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const {
   unsigned Size = AI->getNewValOperand()->getType()->getPrimitiveSizeInBits();
-  if (EnableQuadwordAtomics && Subtarget.hasQuadwordAtomics() && Size == 128)
+  if (shouldInlineQuadwordAtomics() && Size == 128)
     return AtomicExpansionKind::MaskedIntrinsic;
   return TargetLowering::shouldExpandAtomicCmpXchgInIR(AI);
 }
@@ -18079,8 +18085,7 @@
 Value *PPCTargetLowering::emitMaskedAtomicRMWIntrinsic(
     IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr,
     Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const {
-  assert(EnableQuadwordAtomics && Subtarget.hasQuadwordAtomics() &&
-         "Only support quadword now");
+  assert(shouldInlineQuadwordAtomics() && "Only support quadword now");
   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
   Type *ValTy = Incr->getType();
   assert(ValTy->getPrimitiveSizeInBits() == 128);
@@ -18104,8 +18109,7 @@
 Value *PPCTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
     IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
     Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
-  assert(EnableQuadwordAtomics && Subtarget.hasQuadwordAtomics() &&
-         "Only support quadword now");
+  assert(shouldInlineQuadwordAtomics() && "Only support quadword now");
   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
   Type *ValTy = CmpVal->getType();
   assert(ValTy->getPrimitiveSizeInBits() == 128);
Index: clang/test/Sema/atomic-ops.c
===================================================================
--- clang/test/Sema/atomic-ops.c
+++ clang/test/Sema/atomic-ops.c
@@ -9,7 +9,7 @@
 // RUN:   -target-cpu pwr7
 // RUN: %clang_cc1 %s -verify -fgnuc-version=4.2.1 -ffreestanding \
 // RUN:   -fsyntax-only -triple=powerpc64le-linux-gnu -std=c11 \
-// RUN:   -target-cpu pwr8
+// RUN:   -target-cpu pwr8 -DPPC64_PWR8
 
 // Basic parsing/Sema tests for __c11_atomic_*
 
@@ -47,7 +47,11 @@
 _Static_assert(__c11_atomic_is_lock_free(3), ""); // expected-error {{not an integral constant expression}}
 _Static_assert(__c11_atomic_is_lock_free(4), "");
 _Static_assert(__c11_atomic_is_lock_free(8), "");
+#ifndef PPC64_PWR8
 _Static_assert(__c11_atomic_is_lock_free(16), ""); // expected-error {{not an integral constant expression}}
+#else
+_Static_assert(__c11_atomic_is_lock_free(16), ""); // expected-no-error
+#endif
 _Static_assert(__c11_atomic_is_lock_free(17), ""); // expected-error {{not an integral constant expression}}
 
 _Static_assert(__atomic_is_lock_free(1, 0), "");
@@ -55,15 +59,23 @@
 _Static_assert(__atomic_is_lock_free(3, 0), ""); // expected-error {{not an integral constant expression}}
 _Static_assert(__atomic_is_lock_free(4, 0), "");
 _Static_assert(__atomic_is_lock_free(8, 0), "");
+#ifndef PPC64_PWR8
 _Static_assert(__atomic_is_lock_free(16, 0), ""); // expected-error {{not an integral constant expression}}
+#else
+_Static_assert(__atomic_is_lock_free(16, 0), ""); // expected-no-error
+#endif
 _Static_assert(__atomic_is_lock_free(17, 0), ""); // expected-error {{not an integral constant expression}}
 
 _Static_assert(atomic_is_lock_free((atomic_char*)0), "");
 _Static_assert(atomic_is_lock_free((atomic_short*)0), "");
 _Static_assert(atomic_is_lock_free((atomic_int*)0), "");
 _Static_assert(atomic_is_lock_free((atomic_long*)0), "");
+#ifndef PPC64_PWR8
 // noi128-error@+1 {{__int128 is not supported on this target}}
 _Static_assert(atomic_is_lock_free((_Atomic(__int128)*)0), ""); // expected-error {{not an integral constant expression}}
+#else
+_Static_assert(atomic_is_lock_free((_Atomic(__int128)*)0), ""); // expected-no-error
+#endif
 _Static_assert(atomic_is_lock_free(0 + (atomic_char*)0), "");
 
 char i8;
@@ -88,7 +100,11 @@
 _Static_assert(!__atomic_always_lock_free(3, 0), "");
 _Static_assert(__atomic_always_lock_free(4, 0), "");
 _Static_assert(__atomic_always_lock_free(8, 0), "");
+#ifndef PPC64_PWR8
 _Static_assert(!__atomic_always_lock_free(16, 0), "");
+#else
+_Static_assert(__atomic_always_lock_free(16, 0), "");
+#endif
 _Static_assert(!__atomic_always_lock_free(17, 0), "");
 
 _Static_assert(__atomic_always_lock_free(1, incomplete), "");
Index: clang/test/CodeGen/PowerPC/quadword-atomics.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/PowerPC/quadword-atomics.c
@@ -0,0 +1,90 @@
+// RUN: %clang_cc1 -verify -Wno-atomic-alignment -triple powerpc64le-linux-gnu \
+// RUN:   -target-cpu pwr8 -emit-llvm -o - %s | FileCheck %s --check-prefix=PPC64-PWR8
+// RUN: %clang_cc1 -verify -Wno-atomic-alignment -triple powerpc64le-linux-gnu \
+// RUN:   -emit-llvm -o - %s | FileCheck %s --check-prefix=PPC64
+
+typedef struct {
+  char x[16];
+} Q;
+
+typedef _Atomic(Q) AtomicQ;
+
+typedef __int128_t int128_t;
+
+// PPC64-PWR8-LABEL: @test_load(
+// PPC64-PWR8:    [[TMP3:%.*]] = load atomic i128, i128* [[TMP1:%.*]] acquire, align 16
+//
+// PPC64-LABEL: @test_load(
+// PPC64:    call void @__atomic_load(i64 noundef 16, i8* noundef [[TMP3:%.*]], i8* noundef [[TMP4:%.*]], i32 noundef signext 2)
+//
+Q test_load(AtomicQ *ptr) {
+  // expected-no-diagnostics
+  return __c11_atomic_load(ptr, __ATOMIC_ACQUIRE);
+}
+
+// PPC64-PWR8-LABEL: @test_store(
+// PPC64-PWR8:    store atomic i128 [[TMP6:%.*]], i128* [[TMP4:%.*]] release, align 16
+//
+// PPC64-LABEL: @test_store(
+// PPC64:    call void @__atomic_store(i64 noundef 16, i8* noundef [[TMP6:%.*]], i8* noundef [[TMP7:%.*]], i32 noundef signext 3)
+//
+void test_store(Q val, AtomicQ *ptr) {
+  // expected-no-diagnostics
+  __c11_atomic_store(ptr, val, __ATOMIC_RELEASE);
+}
+
+// PPC64-PWR8-LABEL: @test_add(
+// PPC64-PWR8:    [[TMP3:%.*]] = atomicrmw add i128* [[TMP0:%.*]], i128 [[TMP2:%.*]] monotonic, align 16
+//
+// PPC64-LABEL: @test_add(
+// PPC64:    [[CALL:%.*]] = call i128 @__atomic_fetch_add_16(i8* noundef [[TMP2:%.*]], i128 noundef [[TMP3:%.*]], i32 noundef signext 0)
+//
+void test_add(int128_t *ptr, int128_t x) {
+  // expected-no-diagnostics
+  __c11_atomic_fetch_add((_Atomic(int128_t) *)ptr, x, __ATOMIC_RELAXED);
+}
+
+// PPC64-PWR8-LABEL: @test_xchg(
+// PPC64-PWR8:    [[TMP8:%.*]] = atomicrmw xchg i128* [[TMP4:%.*]], i128 [[TMP7:%.*]] seq_cst, align 16
+//
+// PPC64-LABEL: @test_xchg(
+// PPC64:    call void @__atomic_exchange(i64 noundef 16, i8* noundef [[TMP7:%.*]], i8* noundef [[TMP8:%.*]], i8* noundef [[TMP9:%.*]], i32 noundef signext 5)
+//
+Q test_xchg(AtomicQ *ptr, Q new) {
+  // expected-no-diagnostics
+  return __c11_atomic_exchange(ptr, new, __ATOMIC_SEQ_CST);
+}
+
+// PPC64-PWR8-LABEL: @test_cmpxchg(
+// PPC64-PWR8:    [[TMP10:%.*]] = cmpxchg i128* [[TMP5:%.*]], i128 [[TMP8:%.*]], i128 [[TMP9:%.*]] seq_cst monotonic, align 16
+//
+// PPC64-LABEL: @test_cmpxchg(
+// PPC64:    [[CALL:%.*]] = call zeroext i1 @__atomic_compare_exchange(i64 noundef 16, i8* noundef [[TMP8:%.*]], i8* noundef [[TMP9:%.*]], i8* noundef [[TMP10:%.*]], i32 noundef signext 5, i32 noundef signext 0)
+//
+int test_cmpxchg(AtomicQ *ptr, Q *cmp, Q new) {
+  // expected-no-diagnostics
+  return __c11_atomic_compare_exchange_strong(ptr, cmp, new, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED);
+}
+
+// PPC64-PWR8-LABEL: @test_cmpxchg_weak(
+// PPC64-PWR8:    [[TMP10:%.*]] = cmpxchg weak i128* [[TMP5:%.*]], i128 [[TMP8:%.*]], i128 [[TMP9:%.*]] seq_cst monotonic, align 16
+//
+// PPC64-LABEL: @test_cmpxchg_weak(
+// PPC64:    [[CALL:%.*]] = call zeroext i1 @__atomic_compare_exchange(i64 noundef 16, i8* noundef [[TMP8:%.*]], i8* noundef [[TMP9:%.*]], i8* noundef [[TMP10:%.*]], i32 noundef signext 5, i32 noundef signext 0)
+//
+int test_cmpxchg_weak(AtomicQ *ptr, Q *cmp, Q new) {
+  // expected-no-diagnostics
+  return __c11_atomic_compare_exchange_weak(ptr, cmp, new, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED);
+}
+
+// PPC64-PWR8-LABEL: @is_lock_free(
+// PPC64-PWR8:    ret i32 1
+//
+// PPC64-LABEL: @is_lock_free(
+// PPC64:    [[CALL:%.*]] = call zeroext i1 @__atomic_is_lock_free(i64 noundef 16, i8* noundef null)
+//
+int is_lock_free() {
+  AtomicQ q;
+ // expected-no-diagnostics
+  return __c11_atomic_is_lock_free(sizeof(q));
+}
Index: clang/test/CodeGen/PowerPC/atomic-alignment.c
===================================================================
--- clang/test/CodeGen/PowerPC/atomic-alignment.c
+++ clang/test/CodeGen/PowerPC/atomic-alignment.c
@@ -2,8 +2,13 @@
 // RUN:   FileCheck %s --check-prefixes=PPC,PPC32
 // RUN: %clang_cc1 -verify -triple powerpc64le-unknown-linux -emit-llvm -o - %s | \
 // RUN:   FileCheck %s --check-prefixes=PPC,PPC64
+// RUN: %clang_cc1 -verify -triple powerpc64le-unknown-linux -emit-llvm -o - %s \
+// RUN:   -target-cpu pwr8 | FileCheck %s --check-prefixes=PPC,PPC64
 // RUN: %clang_cc1 -verify -triple powerpc64-unknown-aix -emit-llvm -o - %s | \
-// RUN:   FileCheck %s --check-prefixes=PPC,PPC64
+// RUN:   FileCheck %s --check-prefixes=PPC,AIX64
+// RUN: %clang_cc1 -verify -triple powerpc64-unknown-aix -emit-llvm -o - %s \
+// RUN:   -target-cpu pwr8 | FileCheck %s --check-prefixes=PPC,AIX64
+
 
 // PPC: @c = global i8 0, align 1{{$}}
 _Atomic(char) c; // expected-no-diagnostics
@@ -16,6 +21,7 @@
 
 // PPC32: @l = global i32 0, align 4{{$}}
 // PPC64: @l = global i64 0, align 8{{$}}
+// AIX64: @l = global i64 0, align 8{{$}}
 _Atomic(long) l; // expected-no-diagnostics
 
 // PPC: @ll = global i64 0, align 8{{$}}
@@ -27,11 +33,14 @@
 
 // PPC32: @o = global %struct.O zeroinitializer, align 1{{$}}
 // PPC64: @o = global %struct.O zeroinitializer, align 8{{$}}
+// AIX64: @o = global %struct.O zeroinitializer, align 8{{$}}
 _Atomic(O) o; // expected-no-diagnostics
 
 typedef struct {
   char x[16];
 } Q;
 
-// PPC: @q = global %struct.Q zeroinitializer, align 1{{$}}
+// PPC32: @q = global %struct.Q zeroinitializer, align 1{{$}}
+// PPC64: @q = global %struct.Q zeroinitializer, align 16{{$}}
+// AIX64: @q = global %struct.Q zeroinitializer, align 1{{$}}
 _Atomic(Q) q; // expected-no-diagnostics
Index: clang/lib/Basic/Targets/PPC.h
===================================================================
--- clang/lib/Basic/Targets/PPC.h
+++ clang/lib/Basic/Targets/PPC.h
@@ -78,6 +78,7 @@
   bool IsISA2_07 = false;
   bool IsISA3_0 = false;
   bool IsISA3_1 = false;
+  bool HasQuadwordAtomics = false;
 
 protected:
   std::string ABI;
@@ -443,6 +444,17 @@
     MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
   }
 
+  void setMaxAtomicWidth() override {
+    // For layout on ELF targets, we support up to 16 bytes.
+    if (getTriple().isOSBinFormatELF())
+      MaxAtomicPromoteWidth = 128;
+
+    // For power8 and up, backend is able to inline 16-byte atomic lock free
+    // code.
+    if (getTriple().isOSBinFormatELF() && hasFeature("quadword-atomics"))
+      MaxAtomicInlineWidth = 128;
+  }
+
   BuiltinVaListKind getBuiltinVaListKind() const override {
     return TargetInfo::CharPtrBuiltinVaList;
   }
Index: clang/lib/Basic/Targets/PPC.cpp
===================================================================
--- clang/lib/Basic/Targets/PPC.cpp
+++ clang/lib/Basic/Targets/PPC.cpp
@@ -81,6 +81,8 @@
       IsISA3_0 = true;
     } else if (Feature == "+isa-v31-instructions") {
       IsISA3_1 = true;
+    } else if (Feature == "+quadword-atomics") {
+      HasQuadwordAtomics = true;
     }
     // TODO: Finish this list and add an assert that we've handled them
     // all.
@@ -544,6 +546,12 @@
   Features["isa-v30-instructions"] =
       llvm::StringSwitch<bool>(CPU).Case("pwr9", true).Default(false);
 
+  Features["quadword-atomics"] =
+      getTriple().isArch64Bit() && llvm::StringSwitch<bool>(CPU)
+                                       .Case("pwr9", true)
+                                       .Case("pwr8", true)
+                                       .Default(false);
+
   // Power10 includes all the same features as Power9 plus any features specific
   // to the Power10 core.
   if (CPU == "pwr10" || CPU == "power10") {
@@ -654,6 +662,7 @@
       .Case("isa-v207-instructions", IsISA2_07)
       .Case("isa-v30-instructions", IsISA3_0)
       .Case("isa-v31-instructions", IsISA3_1)
+      .Case("quadword-atomics", HasQuadwordAtomics)
       .Default(false);
 }
 
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to