t.p.northover updated this revision to Diff 198452.
t.p.northover added a comment.

Sorry, I managed to forget about this one somehow. I hadn't changed the 32-bit 
requirement, but I agree it shouldn't be there so this diff removes it and adds 
tests for the newly legal cases.


Repository:
  rC Clang

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D55562/new/

https://reviews.llvm.org/D55562

Files:
  clang/docs/LanguageExtensions.rst
  clang/include/clang/Basic/Builtins.def
  clang/include/clang/Basic/DiagnosticSemaKinds.td
  clang/lib/AST/Expr.cpp
  clang/lib/CodeGen/CGAtomic.cpp
  clang/lib/Sema/SemaChecking.cpp
  clang/test/CodeGen/atomic-ops.c
  clang/test/Sema/atomic-ops.c
  clang/test/SemaOpenCL/atomic-ops.cl

Index: clang/test/SemaOpenCL/atomic-ops.cl
===================================================================
--- clang/test/SemaOpenCL/atomic-ops.cl
+++ clang/test/SemaOpenCL/atomic-ops.cl
@@ -73,7 +73,7 @@
   __opencl_atomic_fetch_add(d, 1, memory_order_seq_cst, memory_scope_work_group); // expected-error {{address argument to atomic operation must be a pointer to atomic integer or pointer ('__generic atomic_float *' (aka '__generic _Atomic(float) *') invalid)}}
   __opencl_atomic_fetch_and(i, 1, memory_order_seq_cst, memory_scope_work_group);
   __opencl_atomic_fetch_and(p, 1, memory_order_seq_cst, memory_scope_work_group);
-  __opencl_atomic_fetch_and(d, 1, memory_order_seq_cst, memory_scope_work_group); // expected-error {{address argument to bitwise atomic operation must be a pointer to atomic integer ('__generic atomic_float *' (aka '__generic _Atomic(float) *') invalid)}}
+  __opencl_atomic_fetch_and(d, 1, memory_order_seq_cst, memory_scope_work_group); // expected-error {{address argument to atomic operation must be a pointer to atomic integer ('__generic atomic_float *' (aka '__generic _Atomic(float) *') invalid)}}
 
   __opencl_atomic_fetch_min(i, 1, memory_order_seq_cst, memory_scope_work_group);
   __opencl_atomic_fetch_max(i, 1, memory_order_seq_cst, memory_scope_work_group);
Index: clang/test/Sema/atomic-ops.c
===================================================================
--- clang/test/Sema/atomic-ops.c
+++ clang/test/Sema/atomic-ops.c
@@ -173,8 +173,8 @@
   __atomic_fetch_sub(P, 3, memory_order_seq_cst);
   __atomic_fetch_sub(D, 3, memory_order_seq_cst); // expected-error {{must be a pointer to integer or pointer}}
   __atomic_fetch_sub(s1, 3, memory_order_seq_cst); // expected-error {{must be a pointer to integer or pointer}}
-  __atomic_fetch_min(D, 3, memory_order_seq_cst); // expected-error {{must be a pointer to signed or unsigned 32-bit integer}}
-  __atomic_fetch_max(P, 3, memory_order_seq_cst); // expected-error {{must be a pointer to signed or unsigned 32-bit integer}}
+  __atomic_fetch_min(D, 3, memory_order_seq_cst); // expected-error {{must be a pointer to integer}}
+  __atomic_fetch_max(P, 3, memory_order_seq_cst); // expected-error {{must be a pointer to integer}}
   __atomic_fetch_max(p, 3);                       // expected-error {{too few arguments to function call, expected 3, have 2}}
 
   __c11_atomic_fetch_and(i, 1, memory_order_seq_cst);
@@ -354,6 +354,20 @@
   (void)__c11_atomic_fetch_xor(Ap, val, memory_order_acq_rel);
   (void)__c11_atomic_fetch_xor(Ap, val, memory_order_seq_cst);
 
+  (void)__c11_atomic_fetch_min(Ap, val, memory_order_relaxed);
+  (void)__c11_atomic_fetch_min(Ap, val, memory_order_acquire);
+  (void)__c11_atomic_fetch_min(Ap, val, memory_order_consume);
+  (void)__c11_atomic_fetch_min(Ap, val, memory_order_release);
+  (void)__c11_atomic_fetch_min(Ap, val, memory_order_acq_rel);
+  (void)__c11_atomic_fetch_min(Ap, val, memory_order_seq_cst);
+
+  (void)__c11_atomic_fetch_max(Ap, val, memory_order_relaxed);
+  (void)__c11_atomic_fetch_max(Ap, val, memory_order_acquire);
+  (void)__c11_atomic_fetch_max(Ap, val, memory_order_consume);
+  (void)__c11_atomic_fetch_max(Ap, val, memory_order_release);
+  (void)__c11_atomic_fetch_max(Ap, val, memory_order_acq_rel);
+  (void)__c11_atomic_fetch_max(Ap, val, memory_order_seq_cst);
+
   (void)__c11_atomic_exchange(Ap, val, memory_order_relaxed);
   (void)__c11_atomic_exchange(Ap, val, memory_order_acquire);
   (void)__c11_atomic_exchange(Ap, val, memory_order_consume);
@@ -501,6 +515,20 @@
   (void)__atomic_nand_fetch(p, val, memory_order_acq_rel);
   (void)__atomic_nand_fetch(p, val, memory_order_seq_cst);
 
+  (void)__atomic_max_fetch(p, val, memory_order_relaxed);
+  (void)__atomic_max_fetch(p, val, memory_order_acquire);
+  (void)__atomic_max_fetch(p, val, memory_order_consume);
+  (void)__atomic_max_fetch(p, val, memory_order_release);
+  (void)__atomic_max_fetch(p, val, memory_order_acq_rel);
+  (void)__atomic_max_fetch(p, val, memory_order_seq_cst);
+
+  (void)__atomic_min_fetch(p, val, memory_order_relaxed);
+  (void)__atomic_min_fetch(p, val, memory_order_acquire);
+  (void)__atomic_min_fetch(p, val, memory_order_consume);
+  (void)__atomic_min_fetch(p, val, memory_order_release);
+  (void)__atomic_min_fetch(p, val, memory_order_acq_rel);
+  (void)__atomic_min_fetch(p, val, memory_order_seq_cst);
+
   (void)__atomic_exchange_n(p, val, memory_order_relaxed);
   (void)__atomic_exchange_n(p, val, memory_order_acquire);
   (void)__atomic_exchange_n(p, val, memory_order_consume);
Index: clang/test/CodeGen/atomic-ops.c
===================================================================
--- clang/test/CodeGen/atomic-ops.c
+++ clang/test/CodeGen/atomic-ops.c
@@ -661,4 +661,81 @@
   __atomic_compare_exchange(&aligned_a, &aligned_b, &aligned_c, 1, memory_order_seq_cst, memory_order_seq_cst);
 }
 
+void test_c11_minmax(_Atomic(int) * si, _Atomic(unsigned) * ui, _Atomic(short) * ss, _Atomic(unsigned char) * uc, _Atomic(long long) * sll) {
+  // CHECK-LABEL: @test_c11_minmax
+
+  // CHECK: atomicrmw max i32
+  *si = __c11_atomic_fetch_max(si, 42, memory_order_acquire);
+  // CHECK: atomicrmw min i32
+  *si = __c11_atomic_fetch_min(si, 42, memory_order_acquire);
+  // CHECK: atomicrmw umax i32
+  *ui = __c11_atomic_fetch_max(ui, 42, memory_order_acquire);
+  // CHECK: atomicrmw umin i32
+  *ui = __c11_atomic_fetch_min(ui, 42, memory_order_acquire);
+
+  // CHECK: atomicrmw max i16
+  *ss = __c11_atomic_fetch_max(ss, 42, memory_order_acquire);
+  // CHECK: atomicrmw min i16
+  *ss = __c11_atomic_fetch_min(ss, 42, memory_order_acquire);
+
+  // CHECK: atomicrmw umax i8
+  *uc = __c11_atomic_fetch_max(uc, 42, memory_order_acquire);
+  // CHECK: atomicrmw umin i8
+  *uc = __c11_atomic_fetch_min(uc, 42, memory_order_acquire);
+
+  // CHECK: atomicrmw max i64
+  *sll = __c11_atomic_fetch_max(sll, 42, memory_order_acquire);
+  // CHECK: atomicrmw min i64
+  *sll = __c11_atomic_fetch_min(sll, 42, memory_order_acquire);
+
+}
+
+void test_minmax_postop(int *si, unsigned *ui, unsigned short *us, signed char *sc, unsigned long long *ull) {
+  int val = 42;
+  // CHECK-LABEL: @test_minmax_postop
+
+  // CHECK: [[OLD:%.*]] = atomicrmw max i32* [[PTR:%.*]], i32 [[RHS:%.*]] release
+  // CHECK: [[TST:%.*]] = icmp sgt i32 [[OLD]], [[RHS]]
+  // CHECK: [[NEW:%.*]] = select i1 [[TST]], i32 [[OLD]], i32 [[RHS]]
+  // CHECK: store i32 [[NEW]], i32*
+  *si = __atomic_max_fetch(si, 42, memory_order_release);
+
+  // CHECK: [[OLD:%.*]] = atomicrmw min i32* [[PTR:%.*]], i32 [[RHS:%.*]] release
+  // CHECK: [[TST:%.*]] = icmp slt i32 [[OLD]], [[RHS]]
+  // CHECK: [[NEW:%.*]] = select i1 [[TST]], i32 [[OLD]], i32 [[RHS]]
+  // CHECK: store i32 [[NEW]], i32*
+  *si = __atomic_min_fetch(si, 42, memory_order_release);
+  
+  // CHECK: [[OLD:%.*]] = atomicrmw umax i32* [[PTR:%.*]], i32 [[RHS:%.*]] release
+  // CHECK: [[TST:%.*]] = icmp ugt i32 [[OLD]], [[RHS]]
+  // CHECK: [[NEW:%.*]] = select i1 [[TST]], i32 [[OLD]], i32 [[RHS]]
+  // CHECK: store i32 [[NEW]], i32*
+  *ui = __atomic_max_fetch(ui, 42, memory_order_release);
+
+  // CHECK: [[OLD:%.*]] = atomicrmw umin i32* [[PTR:%.*]], i32 [[RHS:%.*]] release
+  // CHECK: [[TST:%.*]] = icmp ult i32 [[OLD]], [[RHS]]
+  // CHECK: [[NEW:%.*]] = select i1 [[TST]], i32 [[OLD]], i32 [[RHS]]
+  // CHECK: store i32 [[NEW]], i32*
+  *ui = __atomic_min_fetch(ui, 42, memory_order_release);
+
+  // CHECK: [[OLD:%.*]] = atomicrmw umin i16* [[PTR:%.*]], i16 [[RHS:%.*]] release
+  // CHECK: [[TST:%.*]] = icmp ult i16 [[OLD]], [[RHS]]
+  // CHECK: [[NEW:%.*]] = select i1 [[TST]], i16 [[OLD]], i16 [[RHS]]
+  // CHECK: store i16 [[NEW]], i16*
+  *us = __atomic_min_fetch(us, 42, memory_order_release);
+
+  // CHECK: [[OLD:%.*]] = atomicrmw min i8* [[PTR:%.*]], i8 [[RHS:%.*]] release
+  // CHECK: [[TST:%.*]] = icmp slt i8 [[OLD]], [[RHS]]
+  // CHECK: [[NEW:%.*]] = select i1 [[TST]], i8 [[OLD]], i8 [[RHS]]
+  // CHECK: store i8 [[NEW]], i8*
+  *sc = __atomic_min_fetch(sc, 42, memory_order_release);
+
+  // CHECK: [[OLD:%.*]] = call i64 @__atomic_fetch_umin_8(i8* {{%.*}}, i64 [[RHS:%.*]],
+  // CHECK: [[TST:%.*]] = icmp ult i64 [[OLD]], [[RHS]]
+  // CHECK: [[NEW:%.*]] = select i1 [[TST]], i64 [[OLD]], i64 [[RHS]]
+  // CHECK: store i64 [[NEW]], i64*
+  *ull = __atomic_min_fetch(ull, 42, memory_order_release);
+
+}
+
 #endif
Index: clang/lib/Sema/SemaChecking.cpp
===================================================================
--- clang/lib/Sema/SemaChecking.cpp
+++ clang/lib/Sema/SemaChecking.cpp
@@ -4478,20 +4478,19 @@
       && sizeof(NumVals)/sizeof(NumVals[0]) == NumForm,
       "need to update code for modified forms");
   static_assert(AtomicExpr::AO__c11_atomic_init == 0 &&
-                    AtomicExpr::AO__c11_atomic_fetch_xor + 1 ==
+                    AtomicExpr::AO__c11_atomic_fetch_min + 1 ==
                         AtomicExpr::AO__atomic_load,
                 "need to update code for modified C11 atomics");
   bool IsOpenCL = Op >= AtomicExpr::AO__opencl_atomic_init &&
                   Op <= AtomicExpr::AO__opencl_atomic_fetch_max;
   bool IsC11 = (Op >= AtomicExpr::AO__c11_atomic_init &&
-               Op <= AtomicExpr::AO__c11_atomic_fetch_xor) ||
+               Op <= AtomicExpr::AO__c11_atomic_fetch_min) ||
                IsOpenCL;
   bool IsN = Op == AtomicExpr::AO__atomic_load_n ||
              Op == AtomicExpr::AO__atomic_store_n ||
              Op == AtomicExpr::AO__atomic_exchange_n ||
              Op == AtomicExpr::AO__atomic_compare_exchange_n;
   bool IsAddSub = false;
-  bool IsMinMax = false;
 
   switch (Op) {
   case AtomicExpr::AO__c11_atomic_init:
@@ -4542,12 +4541,12 @@
   case AtomicExpr::AO__atomic_or_fetch:
   case AtomicExpr::AO__atomic_xor_fetch:
   case AtomicExpr::AO__atomic_nand_fetch:
-    Form = Arithmetic;
-    break;
-
+  case AtomicExpr::AO__c11_atomic_fetch_min:
+  case AtomicExpr::AO__c11_atomic_fetch_max:
+  case AtomicExpr::AO__atomic_min_fetch:
+  case AtomicExpr::AO__atomic_max_fetch:
   case AtomicExpr::AO__atomic_fetch_min:
   case AtomicExpr::AO__atomic_fetch_max:
-    IsMinMax = true;
     Form = Arithmetic;
     break;
 
@@ -4639,16 +4638,8 @@
           << IsC11 << Ptr->getType() << Ptr->getSourceRange();
       return ExprError();
     }
-    if (IsMinMax) {
-      const BuiltinType *BT = ValType->getAs<BuiltinType>();
-      if (!BT || (BT->getKind() != BuiltinType::Int &&
-                  BT->getKind() != BuiltinType::UInt)) {
-        Diag(DRE->getBeginLoc(), diag::err_atomic_op_needs_int32_or_ptr);
-        return ExprError();
-      }
-    }
-    if (!IsAddSub && !IsMinMax && !ValType->isIntegerType()) {
-      Diag(DRE->getBeginLoc(), diag::err_atomic_op_bitwise_needs_atomic_int)
+    if (!IsAddSub && !ValType->isIntegerType()) {
+      Diag(DRE->getBeginLoc(), diag::err_atomic_op_needs_atomic_int)
           << IsC11 << Ptr->getType() << Ptr->getSourceRange();
       return ExprError();
     }
Index: clang/lib/CodeGen/CGAtomic.cpp
===================================================================
--- clang/lib/CodeGen/CGAtomic.cpp
+++ clang/lib/CodeGen/CGAtomic.cpp
@@ -487,13 +487,36 @@
   CGF.Builder.SetInsertPoint(ContBB);
 }
 
+/// Duplicate the atomic min/max operation in conventional IR for the builtin
+/// variants that return the new rather than the original value.
+static llvm::Value *EmitPostAtomicMinMax(CGBuilderTy &Builder,
+                                         AtomicExpr::AtomicOp Op,
+                                         bool IsSigned,
+                                         llvm::Value *OldVal,
+                                         llvm::Value *RHS) {
+  llvm::CmpInst::Predicate Pred;
+  switch (Op) {
+  default:
+    llvm_unreachable("Unexpected min/max operation");
+  case AtomicExpr::AO__atomic_max_fetch:
+    Pred = IsSigned ? llvm::CmpInst::ICMP_SGT : llvm::CmpInst::ICMP_UGT;
+    break;
+  case AtomicExpr::AO__atomic_min_fetch:
+    Pred = IsSigned ? llvm::CmpInst::ICMP_SLT : llvm::CmpInst::ICMP_ULT;
+    break;
+  }
+  llvm::Value *Cmp = Builder.CreateICmp(Pred, OldVal, RHS, "tst");
+  return Builder.CreateSelect(Cmp, OldVal, RHS, "newval");
+}
+
 static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
                          Address Ptr, Address Val1, Address Val2,
                          llvm::Value *IsWeak, llvm::Value *FailureOrder,
                          uint64_t Size, llvm::AtomicOrdering Order,
                          llvm::SyncScope::ID Scope) {
   llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
-  llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
+  bool PostOpMinMax = false;
+  unsigned PostOp = 0;
 
   switch (E->getOp()) {
   case AtomicExpr::AO__c11_atomic_init:
@@ -587,12 +610,20 @@
     Op = llvm::AtomicRMWInst::Sub;
     break;
 
+  case AtomicExpr::AO__atomic_min_fetch:
+    PostOpMinMax = true;
+    LLVM_FALLTHROUGH;
+  case AtomicExpr::AO__c11_atomic_fetch_min:
   case AtomicExpr::AO__opencl_atomic_fetch_min:
   case AtomicExpr::AO__atomic_fetch_min:
     Op = E->getValueType()->isSignedIntegerType() ? llvm::AtomicRMWInst::Min
                                                   : llvm::AtomicRMWInst::UMin;
     break;
 
+  case AtomicExpr::AO__atomic_max_fetch:
+    PostOpMinMax = true;
+    LLVM_FALLTHROUGH;
+  case AtomicExpr::AO__c11_atomic_fetch_max:
   case AtomicExpr::AO__opencl_atomic_fetch_max:
   case AtomicExpr::AO__atomic_fetch_max:
     Op = E->getValueType()->isSignedIntegerType() ? llvm::AtomicRMWInst::Max
@@ -642,8 +673,13 @@
   // For __atomic_*_fetch operations, perform the operation again to
   // determine the value which was written.
   llvm::Value *Result = RMWI;
-  if (PostOp)
-    Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
+  if (PostOpMinMax)
+    Result = EmitPostAtomicMinMax(CGF.Builder, E->getOp(),
+                                  E->getValueType()->isSignedIntegerType(),
+                                  RMWI, LoadVal1);
+  else if (PostOp)
+    Result = CGF.Builder.CreateBinOp((llvm::Instruction::BinaryOps)PostOp, RMWI,
+                                     LoadVal1);
   if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
     Result = CGF.Builder.CreateNot(Result);
   CGF.Builder.CreateStore(Result, Dest);
@@ -852,6 +888,8 @@
   case AtomicExpr::AO__c11_atomic_fetch_and:
   case AtomicExpr::AO__c11_atomic_fetch_or:
   case AtomicExpr::AO__c11_atomic_fetch_xor:
+  case AtomicExpr::AO__c11_atomic_fetch_max:
+  case AtomicExpr::AO__c11_atomic_fetch_min:
   case AtomicExpr::AO__opencl_atomic_fetch_and:
   case AtomicExpr::AO__opencl_atomic_fetch_or:
   case AtomicExpr::AO__opencl_atomic_fetch_xor:
@@ -865,8 +903,10 @@
   case AtomicExpr::AO__atomic_or_fetch:
   case AtomicExpr::AO__atomic_xor_fetch:
   case AtomicExpr::AO__atomic_nand_fetch:
-  case AtomicExpr::AO__atomic_fetch_min:
+  case AtomicExpr::AO__atomic_max_fetch:
+  case AtomicExpr::AO__atomic_min_fetch:
   case AtomicExpr::AO__atomic_fetch_max:
+  case AtomicExpr::AO__atomic_fetch_min:
     Val1 = EmitValToTemp(*this, E->getVal1());
     break;
   }
@@ -915,14 +955,18 @@
     case AtomicExpr::AO__opencl_atomic_fetch_min:
     case AtomicExpr::AO__opencl_atomic_fetch_max:
     case AtomicExpr::AO__atomic_fetch_xor:
+    case AtomicExpr::AO__c11_atomic_fetch_max:
+    case AtomicExpr::AO__c11_atomic_fetch_min:
     case AtomicExpr::AO__atomic_add_fetch:
     case AtomicExpr::AO__atomic_and_fetch:
     case AtomicExpr::AO__atomic_nand_fetch:
     case AtomicExpr::AO__atomic_or_fetch:
     case AtomicExpr::AO__atomic_sub_fetch:
     case AtomicExpr::AO__atomic_xor_fetch:
-    case AtomicExpr::AO__atomic_fetch_min:
     case AtomicExpr::AO__atomic_fetch_max:
+    case AtomicExpr::AO__atomic_fetch_min:
+    case AtomicExpr::AO__atomic_max_fetch:
+    case AtomicExpr::AO__atomic_min_fetch:
       // For these, only library calls for certain sizes exist.
       UseOptimizedLibcall = true;
       break;
@@ -990,6 +1034,7 @@
     QualType RetTy;
     bool HaveRetTy = false;
     llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
+    bool PostOpMinMax = false;
     switch (E->getOp()) {
     case AtomicExpr::AO__c11_atomic_init:
     case AtomicExpr::AO__opencl_atomic_init:
@@ -1111,6 +1156,10 @@
       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
                         MemTy, E->getExprLoc(), sizeChars);
       break;
+    case AtomicExpr::AO__atomic_min_fetch:
+      PostOpMinMax = true;
+      LLVM_FALLTHROUGH;
+    case AtomicExpr::AO__c11_atomic_fetch_min:
     case AtomicExpr::AO__atomic_fetch_min:
     case AtomicExpr::AO__opencl_atomic_fetch_min:
       LibCallName = E->getValueType()->isSignedIntegerType()
@@ -1119,6 +1168,10 @@
       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
                         LoweredMemTy, E->getExprLoc(), sizeChars);
       break;
+    case AtomicExpr::AO__atomic_max_fetch:
+      PostOpMinMax = true;
+      LLVM_FALLTHROUGH;
+    case AtomicExpr::AO__c11_atomic_fetch_max:
     case AtomicExpr::AO__atomic_fetch_max:
     case AtomicExpr::AO__opencl_atomic_fetch_max:
       LibCallName = E->getValueType()->isSignedIntegerType()
@@ -1170,7 +1223,7 @@
     // PostOp is only needed for the atomic_*_fetch operations, and
     // thus is only needed for and implemented in the
     // UseOptimizedLibcall codepath.
-    assert(UseOptimizedLibcall || !PostOp);
+    assert(UseOptimizedLibcall || (!PostOp && !PostOpMinMax));
 
     RValue Res = emitAtomicLibcall(*this, LibCallName, RetTy, Args);
     // The value is returned directly from the libcall.
@@ -1181,7 +1234,12 @@
     // provided an out-param.
     if (UseOptimizedLibcall && Res.getScalarVal()) {
       llvm::Value *ResVal = Res.getScalarVal();
-      if (PostOp) {
+      if (PostOpMinMax) {
+        llvm::Value *LoadVal1 = Args[1].getRValue(*this).getScalarVal();
+        ResVal = EmitPostAtomicMinMax(Builder, E->getOp(),
+                                      E->getValueType()->isSignedIntegerType(),
+                                      ResVal, LoadVal1);
+      } else if (PostOp) {
         llvm::Value *LoadVal1 = Args[1].getRValue(*this).getScalarVal();
         ResVal = Builder.CreateBinOp(PostOp, ResVal, LoadVal1);
       }
Index: clang/lib/AST/Expr.cpp
===================================================================
--- clang/lib/AST/Expr.cpp
+++ clang/lib/AST/Expr.cpp
@@ -4241,6 +4241,8 @@
   case AO__c11_atomic_fetch_and:
   case AO__c11_atomic_fetch_or:
   case AO__c11_atomic_fetch_xor:
+  case AO__c11_atomic_fetch_max:
+  case AO__c11_atomic_fetch_min:
   case AO__atomic_fetch_add:
   case AO__atomic_fetch_sub:
   case AO__atomic_fetch_and:
@@ -4253,6 +4255,8 @@
   case AO__atomic_or_fetch:
   case AO__atomic_xor_fetch:
   case AO__atomic_nand_fetch:
+  case AO__atomic_min_fetch:
+  case AO__atomic_max_fetch:
   case AO__atomic_fetch_min:
   case AO__atomic_fetch_max:
     return 3;
Index: clang/include/clang/Basic/DiagnosticSemaKinds.td
===================================================================
--- clang/include/clang/Basic/DiagnosticSemaKinds.td
+++ clang/include/clang/Basic/DiagnosticSemaKinds.td
@@ -7134,10 +7134,8 @@
 def err_atomic_op_needs_atomic_int_or_ptr : Error<
   "address argument to atomic operation must be a pointer to %select{|atomic }0"
   "integer or pointer (%1 invalid)">;
-def err_atomic_op_needs_int32_or_ptr : Error<
-  "address argument to atomic operation must be a pointer to signed or unsigned 32-bit integer">;
-def err_atomic_op_bitwise_needs_atomic_int : Error<
-  "address argument to bitwise atomic operation must be a pointer to "
+def err_atomic_op_needs_atomic_int : Error<
+  "address argument to atomic operation must be a pointer to "
   "%select{|atomic }0integer (%1 invalid)">;
 def warn_atomic_op_has_invalid_memory_order : Warning<
   "memory order argument to atomic operation is invalid">,
Index: clang/include/clang/Basic/Builtins.def
===================================================================
--- clang/include/clang/Basic/Builtins.def
+++ clang/include/clang/Basic/Builtins.def
@@ -693,6 +693,8 @@
 ATOMIC_BUILTIN(__c11_atomic_fetch_and, "v.", "t")
 ATOMIC_BUILTIN(__c11_atomic_fetch_or, "v.", "t")
 ATOMIC_BUILTIN(__c11_atomic_fetch_xor, "v.", "t")
+ATOMIC_BUILTIN(__c11_atomic_fetch_max, "v.", "t")
+ATOMIC_BUILTIN(__c11_atomic_fetch_min, "v.", "t")
 BUILTIN(__c11_atomic_thread_fence, "vi", "n")
 BUILTIN(__c11_atomic_signal_fence, "vi", "n")
 BUILTIN(__c11_atomic_is_lock_free, "iz", "n")
@@ -717,6 +719,8 @@
 ATOMIC_BUILTIN(__atomic_and_fetch, "v.", "t")
 ATOMIC_BUILTIN(__atomic_or_fetch, "v.", "t")
 ATOMIC_BUILTIN(__atomic_xor_fetch, "v.", "t")
+ATOMIC_BUILTIN(__atomic_max_fetch, "v.", "t")
+ATOMIC_BUILTIN(__atomic_min_fetch, "v.", "t")
 ATOMIC_BUILTIN(__atomic_nand_fetch, "v.", "t")
 BUILTIN(__atomic_test_and_set, "bvD*i", "n")
 BUILTIN(__atomic_clear, "vvD*i", "n")
Index: clang/docs/LanguageExtensions.rst
===================================================================
--- clang/docs/LanguageExtensions.rst
+++ clang/docs/LanguageExtensions.rst
@@ -2155,6 +2155,8 @@
 * ``__c11_atomic_fetch_and``
 * ``__c11_atomic_fetch_or``
 * ``__c11_atomic_fetch_xor``
+* ``__c11_atomic_fetch_max``
+* ``__c11_atomic_fetch_min``
 
 The macros ``__ATOMIC_RELAXED``, ``__ATOMIC_CONSUME``, ``__ATOMIC_ACQUIRE``,
 ``__ATOMIC_RELEASE``, ``__ATOMIC_ACQ_REL``, and ``__ATOMIC_SEQ_CST`` are
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to