This patch implements support for the ACLE special register intrinsics in 
section 10.1, __arm_{w,r}sr{,p,64}.

This includes arm_acle.h definitions, new builtins to support these definitions 
and codegen for them, as well as SemaChecking support to fault invalid 
parameters (as far as is reasonable, the backend will further validate this). 
This unfortunately adds a bit of instruction detail stuff into the frontend 
that perhaps shouldn't be there, however given that these intrinsics take a 
string parameter I couldn't see how to reasonably pass this string straight to 
the backend and have that interpret what it should be (which I would have 
rathered do). So instead the builtin codegen interprets the string and emits 
the appropriate inline asm.

http://reviews.llvm.org/D6247

Files:
  include/clang/Basic/BuiltinsAArch64.def
  include/clang/Basic/BuiltinsARM.def
  include/clang/Basic/DiagnosticSemaKinds.td
  include/clang/Sema/Sema.h
  lib/CodeGen/CGBuiltin.cpp
  lib/Headers/arm_acle.h
  lib/Sema/SemaChecking.cpp
  test/CodeGen/arm_acle.c
  test/CodeGen/builtins-arm.c
  test/CodeGen/builtins-arm64.c
  test/Sema/aarch64-special-register.c
  test/Sema/arm-special-register.c
Index: include/clang/Basic/BuiltinsAArch64.def
===================================================================
--- include/clang/Basic/BuiltinsAArch64.def
+++ include/clang/Basic/BuiltinsAArch64.def
@@ -53,4 +53,12 @@
 // Prefetch
 BUILTIN(__builtin_arm_prefetch, "vvC*UiUiUiUi", "nc")
 
+// System registers
+BUILTIN(__builtin_arm_rsr, "UicC*", "nc")
+BUILTIN(__builtin_arm_rsr64, "LUicC*", "nc")
+BUILTIN(__builtin_arm_rsrp, "v*cC*", "nc")
+BUILTIN(__builtin_arm_wsr, "vcC*Ui", "nc")
+BUILTIN(__builtin_arm_wsr64, "vcC*LUi", "nc")
+BUILTIN(__builtin_arm_wsrp, "vcC*vC*", "nc")
+
 #undef BUILTIN
Index: include/clang/Basic/BuiltinsARM.def
===================================================================
--- include/clang/Basic/BuiltinsARM.def
+++ include/clang/Basic/BuiltinsARM.def
@@ -57,6 +57,14 @@
 BUILTIN(__builtin_arm_mcrr, "vUiUiUiUiUi", "")
 BUILTIN(__builtin_arm_mcrr2, "vUiUiUiUiUi", "")
 
+// System registers (ACLE)
+BUILTIN(__builtin_arm_rsr, "UicC*", "nc")
+BUILTIN(__builtin_arm_rsr64, "LLUicC*", "nc")
+BUILTIN(__builtin_arm_rsrp, "v*cC*", "nc")
+BUILTIN(__builtin_arm_wsr, "vcC*Ui", "nc")
+BUILTIN(__builtin_arm_wsr64, "vcC*LLUi", "nc")
+BUILTIN(__builtin_arm_wsrp, "vcC*vC*", "nc")
+
 // CRC32
 BUILTIN(__builtin_arm_crc32b, "UiUiUc", "nc")
 BUILTIN(__builtin_arm_crc32cb, "UiUiUc", "nc")
Index: include/clang/Basic/DiagnosticSemaKinds.td
===================================================================
--- include/clang/Basic/DiagnosticSemaKinds.td
+++ include/clang/Basic/DiagnosticSemaKinds.td
@@ -60,6 +60,7 @@
 def ext_expr_not_ice : Extension<
   "expression is not an %select{integer|integral}0 constant expression; "
   "folding it to a constant is a GNU extension">, InGroup<GNUFoldingConstant>;
+def err_expr_not_string_literal : Error<"expression is not a string literal">;
 def err_typecheck_converted_constant_expression : Error<
   "value of type %0 is not implicitly convertible to %1">;
 def err_typecheck_converted_constant_expression_disallowed : Error<
@@ -403,6 +404,7 @@
   "incompatible redeclaration of library function %0">,
   InGroup<DiagGroup<"incompatible-library-redeclaration">>;
 def err_builtin_definition : Error<"definition of builtin function %0">;
+def err_arm_invalid_specialreg : Error<"invalid special register for builtin">;
 def warn_builtin_unknown : Warning<"use of unknown builtin %0">,
   InGroup<ImplicitFunctionDeclare>, DefaultError;
 def warn_dyn_class_memaccess : Warning<
Index: include/clang/Sema/Sema.h
===================================================================
--- include/clang/Sema/Sema.h
+++ include/clang/Sema/Sema.h
@@ -8513,6 +8513,8 @@
                               llvm::APSInt &Result);
   bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum,
                                    int Low, int High);
+  bool SemaBuiltinARMSpecialReg(CallExpr *TheCall, int ArgNum,
+                                unsigned ExpectedFieldNum, bool AllowName);
 
 public:
   enum FormatStringType {
Index: lib/CodeGen/CGBuiltin.cpp
===================================================================
--- lib/CodeGen/CGBuiltin.cpp
+++ lib/CodeGen/CGBuiltin.cpp
@@ -22,6 +22,7 @@
 #include "clang/CodeGen/CGFunctionInfo.h"
 #include "llvm/IR/DataLayout.h"
 #include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/InlineAsm.h"
 
 using namespace clang;
 using namespace CodeGen;
@@ -3344,6 +3345,116 @@
     }
   }
 
+  if (BuiltinID == ARM::BI__builtin_arm_rsr ||
+      BuiltinID == ARM::BI__builtin_arm_rsr64 ||
+      BuiltinID == ARM::BI__builtin_arm_rsrp ||
+      BuiltinID == ARM::BI__builtin_arm_wsr ||
+      BuiltinID == ARM::BI__builtin_arm_wsr64 ||
+      BuiltinID == ARM::BI__builtin_arm_wsrp) {
+    const Expr *SysRegStrExpr = E->getArg(0)->IgnoreParenCasts();
+    StringRef SysReg = cast<StringLiteral>(SysRegStrExpr)->getString();
+
+    // Split apart the fields of the string so that we can properly pass it
+    // through to the correct system instruction
+    SmallVector<StringRef, 5> Fields;
+    SysReg.split(Fields, ":");
+
+    llvm::Type *RegType;
+    StringRef Opc;
+    bool Is64Bit = false;
+    bool IsRead = false;
+    switch (BuiltinID) {
+    case ARM::BI__builtin_arm_rsr:
+      IsRead = true; // Fallthrough
+    case ARM::BI__builtin_arm_wsr:
+      RegType = Int32Ty;
+      break;
+
+    case ARM::BI__builtin_arm_rsrp:
+      IsRead = true; // Fallthrough
+    case ARM::BI__builtin_arm_wsrp:
+      RegType = VoidPtrTy;
+      break;
+
+    case ARM::BI__builtin_arm_rsr64:
+      IsRead = true;
+      Is64Bit = true;
+      RegType = llvm::StructType::get(Int32Ty, Int32Ty, nullptr);
+      break;
+    case ARM::BI__builtin_arm_wsr64:
+      Is64Bit = true;
+      RegType = Int32Ty;
+      break;
+
+    default:
+      llvm_unreachable("Unexpected builtin");
+    }
+
+    // If we have 5 fields then map to an MCR/MRC instruction with 32-bit
+    // input/output.
+    // If we have 3 fields then map to an MCRR/MRRC instruction with 64-bit
+    // input/output.
+    // Otherwise map to an MSR/MRS instruction with 32-bit input/output.
+    // We can safely assume we are one of these cases since SemaChecking checks
+    // this.
+    StringRef AsmString;
+    if (Fields.size() == 5)
+      AsmString = Twine((IsRead ? "mrc " : "mcr ") + Fields[0] + ", " +
+                        Fields[1] + ", $0, " + Fields[2] + ", " + Fields[3] +
+                        ", " + Fields[4]).str();
+    else if (Fields.size() == 3)
+      AsmString = Twine((IsRead ? "mrrc " : "mcrr ") + Fields[0] + ", " +
+                        Fields[1] + ", $0, $1, " + Fields[2]).str();
+    else if (IsRead)
+      AsmString = Twine("mrs $0, " + SysReg).str();
+    else
+      AsmString = Twine("msr " + SysReg + ", $0").str();
+
+    std::vector<llvm::Type *> ArgTypes;
+    if (IsRead) {
+      // Emit the read version of our instruction (MRC/MRRC/MRS)
+      llvm::FunctionType *FTy =
+          llvm::FunctionType::get(RegType, ArgTypes, false);
+      llvm::InlineAsm *IA =
+          llvm::InlineAsm::get(FTy, AsmString, Is64Bit ? "=r,=r" : "=r", true,
+                               false, llvm::InlineAsm::AD_ATT);
+
+      if (Is64Bit) {
+        // Stitch together the two 32-bit outputs if we are a 64-bit output
+        Value *IARet = Builder.CreateCall(IA);
+        Value *HiIARet =
+            Builder.CreateZExt(Builder.CreateExtractValue(IARet, 1), Int64Ty);
+        Value *LoIARet =
+            Builder.CreateZExt(Builder.CreateExtractValue(IARet, 0), Int64Ty);
+        HiIARet = Builder.CreateShl(HiIARet, 32);
+        return Builder.CreateOr(HiIARet, LoIARet);
+      } else
+        return Builder.CreateCall(IA);
+    } else {
+      // Emit the write version of our instruction (MCR/MCRR/MSR)
+      ArgTypes.push_back(RegType);
+      if (Is64Bit)
+        ArgTypes.push_back(RegType);
+
+      llvm::FunctionType *FTy =
+          llvm::FunctionType::get(VoidTy, ArgTypes, false);
+      llvm::InlineAsm *IA =
+          llvm::InlineAsm::get(FTy, AsmString, Is64Bit ? "r,r" : "r", true,
+                               false, llvm::InlineAsm::AD_ATT);
+
+      if (Is64Bit) {
+        // Pull apart a 64-bit input to pass as two 32-bit registers
+        Value *Arg = EmitScalarExpr(E->getArg(1));
+        Value *HiArg = Builder.CreateLShr(Arg, 32);
+        HiArg = Builder.CreateTrunc(HiArg, Int32Ty);
+        Value *LoArg = Builder.CreateTrunc(Arg, Int32Ty);
+
+        return Builder.CreateCall2(IA, LoArg, HiArg);
+      } else
+        return Builder.CreateCall(IA, EmitScalarExpr(E->getArg(1)));
+    }
+  }
+
   SmallVector<Value*, 4> Ops;
   llvm::Value *Align = nullptr;
   for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) {
@@ -4073,6 +4184,68 @@
     return Builder.CreateCall(F);
   }
 
+  if (BuiltinID == AArch64::BI__builtin_arm_rsr ||
+      BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
+      BuiltinID == AArch64::BI__builtin_arm_rsrp ||
+      BuiltinID == AArch64::BI__builtin_arm_wsr ||
+      BuiltinID == AArch64::BI__builtin_arm_wsr64 ||
+      BuiltinID == AArch64::BI__builtin_arm_wsrp) {
+    const Expr *SysRegStrExpr = E->getArg(0)->IgnoreParenCasts();
+    StringRef SysReg = cast<StringLiteral>(SysRegStrExpr)->getString();
+
+    // If the sysreg string is of form o0:op1:CRn:CRm:op2, then we are
+    // referencing a system register by encoding, reformat to
+    // S<o0>_<op1>_C<CRn>_C<CRm>_<op2> which is the real format of such system
+    // registers.
+    SmallVector<StringRef, 5> Fields;
+    SysReg.split(Fields, ":");
+    uint32_t O0, Op1, Op2, CRn, CRm;
+    if (Fields.size() == 5 && !Fields[0].getAsInteger(10, O0) &&
+        !Fields[1].getAsInteger(10, Op1) && !Fields[2].getAsInteger(10, CRn) &&
+        !Fields[3].getAsInteger(10, CRm) && !Fields[4].getAsInteger(10, Op2)) {
+      SysReg = Twine("S" + Twine(O0) + "_" + Twine(Op1) + "_C" + Twine(CRn) +
+                     "_C" + Twine(CRm) + "_" + Twine(Op2)).str();
+    }
+
+    llvm::Type *RegType;
+    switch (BuiltinID) {
+    case AArch64::BI__builtin_arm_rsr:
+    case AArch64::BI__builtin_arm_wsr:
+      RegType = Int32Ty;
+      break;
+    case AArch64::BI__builtin_arm_rsr64:
+    case AArch64::BI__builtin_arm_wsr64:
+      RegType = Int64Ty;
+      break;
+    case AArch64::BI__builtin_arm_rsrp:
+    case AArch64::BI__builtin_arm_wsrp:
+      RegType = VoidPtrTy;
+      break;
+    default:
+      llvm_unreachable("Unexpected builtin");
+    }
+
+    std::vector<llvm::Type *> ArgTypes;
+    if (BuiltinID == AArch64::BI__builtin_arm_rsr ||
+        BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
+        BuiltinID == AArch64::BI__builtin_arm_rsrp) {
+      llvm::FunctionType *FTy =
+          llvm::FunctionType::get(RegType, ArgTypes, false);
+      llvm::InlineAsm *IA =
+          llvm::InlineAsm::get(FTy, Twine("mrs $0, " + SysReg).str(), "=r",
+                               true, false, llvm::InlineAsm::AD_ATT);
+      return Builder.CreateCall(IA);
+    } else {
+      ArgTypes.push_back(RegType);
+      llvm::FunctionType *FTy =
+          llvm::FunctionType::get(VoidTy, ArgTypes, false);
+      llvm::InlineAsm *IA =
+          llvm::InlineAsm::get(FTy, Twine("msr " + SysReg + ", $0").str(), "r",
+                               true, false, llvm::InlineAsm::AD_ATT);
+      return Builder.CreateCall(IA, EmitScalarExpr(E->getArg(1)));
+    }
+  }
+
   // CRC32
   Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
   switch (BuiltinID) {
Index: lib/Headers/arm_acle.h
===================================================================
--- lib/Headers/arm_acle.h
+++ lib/Headers/arm_acle.h
@@ -289,6 +289,14 @@
 }
 #endif
 
+/* 10.1 Special register intrinsics */
+#define __arm_rsr(sysreg) __builtin_arm_rsr(sysreg)
+#define __arm_rsr64(sysreg) __builtin_arm_rsr64(sysreg)
+#define __arm_rsrp(sysreg) __builtin_arm_rsrp(sysreg)
+#define __arm_wsr(sysreg, v) __builtin_arm_wsr(sysreg, v)
+#define __arm_wsr64(sysreg, v) __builtin_arm_wsr64(sysreg, v)
+#define __arm_wsrp(sysreg, v) __builtin_arm_wsrp(sysreg, v)
+
 #if defined(__cplusplus)
 }
 #endif
Index: lib/Sema/SemaChecking.cpp
===================================================================
--- lib/Sema/SemaChecking.cpp
+++ lib/Sema/SemaChecking.cpp
@@ -695,6 +695,16 @@
       SemaBuiltinConstantArgRange(TheCall, 2, 0, 1);
   }
 
+  if (BuiltinID == ARM::BI__builtin_arm_rsr64 ||
+      BuiltinID == ARM::BI__builtin_arm_wsr64)
+    return SemaBuiltinARMSpecialReg(TheCall, 0, 3, false);
+
+  if (BuiltinID == ARM::BI__builtin_arm_rsr ||
+      BuiltinID == ARM::BI__builtin_arm_rsrp ||
+      BuiltinID == ARM::BI__builtin_arm_wsr ||
+      BuiltinID == ARM::BI__builtin_arm_wsrp)
+    return SemaBuiltinARMSpecialReg(TheCall, 0, 5, true);
+
   if (CheckNeonBuiltinFunctionCall(BuiltinID, TheCall))
     return true;
 
@@ -735,6 +745,16 @@
       SemaBuiltinConstantArgRange(TheCall, 4, 0, 1);
   }
 
+  if (BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
+      BuiltinID == AArch64::BI__builtin_arm_wsr64)
+    return SemaBuiltinARMSpecialReg(TheCall, 0, 5, false);
+
+  if (BuiltinID == AArch64::BI__builtin_arm_rsr ||
+      BuiltinID == AArch64::BI__builtin_arm_rsrp ||
+      BuiltinID == AArch64::BI__builtin_arm_wsr ||
+      BuiltinID == AArch64::BI__builtin_arm_wsrp)
+    return SemaBuiltinARMSpecialReg(TheCall, 0, 5, true);
+
   if (CheckNeonBuiltinFunctionCall(BuiltinID, TheCall))
     return true;
 
@@ -2289,6 +2309,32 @@
   return false;
 }
 
+/// SemaBuiltinARMSpecialReg - Handle a check if argument ArgNum of CallExpr
+/// TheCall is an ARM/AArch64 special register string literal.
+bool Sema::SemaBuiltinARMSpecialReg(CallExpr *TheCall, int ArgNum,
+                                    unsigned ExpectedFieldNum, bool AllowName) {
+  // We can't check the value of a dependent argument.
+  Expr *Arg = TheCall->getArg(ArgNum);
+  if (Arg->isTypeDependent() || Arg->isValueDependent())
+    return false;
+
+  // Check if the argument is a string literal
+  if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts()))
+    return Diag(TheCall->getLocStart(), diag::err_expr_not_string_literal)
+           << Arg->getSourceRange();
+
+  // Check the type of special register given
+  StringRef Reg = cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString();
+  SmallVector<StringRef, 6> Fields;
+  Reg.split(Fields, ":");
+
+  if (Fields.size() != ExpectedFieldNum && !(AllowName && Fields.size() == 1))
+    return Diag(TheCall->getLocStart(), diag::err_arm_invalid_specialreg)
+           << Arg->getSourceRange();
+
+  return false;
+}
+
 /// SemaBuiltinLongjmp - Handle __builtin_longjmp(void *env[5], int val).
 /// This checks that val is a constant 1.
 bool Sema::SemaBuiltinLongjmp(CallExpr *TheCall) {
Index: test/CodeGen/arm_acle.c
===================================================================
--- test/CodeGen/arm_acle.c
+++ test/CodeGen/arm_acle.c
@@ -77,7 +77,7 @@
 // AArch64: call i64 @llvm.aarch64.ldxr
 // AArch64: call i32 @llvm.aarch64.stxr
 uint32_t test_swp(uint32_t x, volatile void *p) {
-  __swp(x, p);
+  return __swp(x, p);
 }
 
 /* 8.6 Memory prefetch intrinsics */
@@ -336,3 +336,54 @@
 uint32_t test_crc32cd(uint32_t a, uint64_t b) {
   return __crc32cd(a, b);
 }
+
+/* 10.1 Special register intrinsics */
+// ARM-LABEL: test_rsr
+// AArch64: call i32 asm {{.*}} "mrs $0, S1_2_C3_C4_5", "=r"()
+// AArch32: call i32 asm {{.*}} "mrc 1, 2, $0, 3, 4, 5", "=r"()
+uint32_t test_rsr() {
+  return __arm_rsr("1:2:3:4:5");
+}
+
+// ARM-LABEL: test_rsr64
+// AArch64: call i64 asm {{.*}} "mrs $0, S1_2_C3_C4_5", "=r"()
+// AArch32: call { i32, i32 } asm {{.*}} "mrrc 1, 2, $0, $1, 3", "=r,=r"()
+uint64_t test_rsr64() {
+#ifdef __ARM_32BIT_STATE
+  return __arm_rsr64("1:2:3");
+#else
+  return __arm_rsr64("1:2:3:4:5");
+#endif
+}
+
+// ARM-LABEL: test_rsrp
+// AArch64: call i8* asm {{.*}} "mrs $0, sysreg", "=r"()
+// AArch32: call i8* asm {{.*}} "mrs $0, sysreg", "=r"()
+void *test_rsrp() {
+  return __arm_rsrp("sysreg");
+}
+
+// ARM-LABEL: test_wsr
+// AArch64: call void asm {{.*}} "msr S1_2_C3_C4_5, $0", "r"(i32 %v)
+// AArch32: call void asm {{.*}} "mcr 1, 2, $0, 3, 4, 5", "r"(i32 %v)
+void test_wsr(uint32_t v) {
+  __arm_wsr("1:2:3:4:5", v);
+}
+
+// ARM-LABEL: test_wsr64
+// AArch64: call void asm {{.*}} "msr S1_2_C3_C4_5, $0", "r"(i64 %v)
+// AArch32: call void asm {{.*}} "mcrr 1, 2, $0, $1, 3", "r,r"(i32 {{.*}}, i32 {{.*}})
+void test_wsr64(uint64_t v) {
+#ifdef __ARM_32BIT_STATE
+  __arm_wsr64("1:2:3", v);
+#else
+  __arm_wsr64("1:2:3:4:5", v);
+#endif
+}
+
+// ARM-LABEL: test_wsrp
+// AArch64: call void asm {{.*}} "msr sysreg, $0", "r"(i8* %v)
+// AArch32: call void asm {{.*}} "msr sysreg, $0", "r"(i8* %v)
+void test_wsrp(void *v) {
+  __arm_wsrp("sysreg", v);
+}
Index: test/CodeGen/builtins-arm.c
===================================================================
--- test/CodeGen/builtins-arm.c
+++ test/CodeGen/builtins-arm.c
@@ -84,3 +84,38 @@
   __builtin_arm_prefetch(&i, 1, 0);
 // CHECK: call {{.*}} @llvm.prefetch(i8* %{{.*}}, i32 1, i32 3, i32 0)
 }
+
+unsigned rsr() {
+  // CHECK: call i32 asm {{.*}} "mrc 1, 2, $0, 3, 4, 5", "=r"()
+  return __builtin_arm_rsr("1:2:3:4:5");
+}
+unsigned long long rsr64() {
+  // CHECK: [[V0:[%A-Za-z0-9.]+]] = tail call { i32, i32 } asm {{.*}} "mrrc 1, 2, $0, $1, 3", "=r,=r"()
+  // CHECK-NEXT: [[V1:[%A-Za-z0-9.]+]] = extractvalue { i32, i32 } [[V0]], 1
+  // CHECK-NEXT: [[V2:[%A-Za-z0-9.]+]] = zext i32 [[V1]] to i64
+  // CHECK-NEXT: [[V3:[%A-Za-z0-9.]+]] = extractvalue { i32, i32 } [[V0]], 0
+  // CHECK-NEXT: [[V4:[%A-Za-z0-9.]+]] = zext i32 [[V3]] to i64
+  // CHECK-NEXT: [[V5:[%A-Za-z0-9.]+]] = shl nuw i64 [[V2]], 32
+  // CHECK-NEXT: [[V6:[%A-Za-z0-9.]+]] = or i64 [[V5]], [[V4]]
+  // CHECK-NEXT: ret i64 [[V6]]
+  return __builtin_arm_rsr64("1:2:3");
+}
+void *rsrp() {
+  // CHECK: call i8* asm {{.*}} "mrs $0, sysreg", "=r"()
+  return __builtin_arm_rsrp("sysreg");
+}
+void wsr(unsigned v) {
+  // CHECK: call void asm {{.*}} "mcr 1, 2, $0, 3, 4, 5", "r"(i32 %v)
+  __builtin_arm_wsr("1:2:3:4:5", v);
+}
+void wsr64(unsigned long long v) {
+  // CHECK: [[V0:[%A-Za-z0-9.]+]] = lshr i64 %v, 32
+  // CHECK-NEXT: [[V1:[%A-Za-z0-9.]+]] = trunc i64 [[V0]] to i32
+  // CHECK-NEXT: [[V2:[%A-Za-z0-9.]+]] = trunc i64 %v to i32
+  // CHECK-NEXT: call void asm {{.*}} "mcrr 1, 2, $0, $1, 3", "r,r"(i32 [[V2]], i32 [[V1]])
+  __builtin_arm_wsr64("1:2:3", v);
+}
+void wsrp(void *v) {
+  // CHECK: call void asm {{.*}} "msr sysreg, $0", "r"(i8* %v)
+  __builtin_arm_wsrp("sysreg", v);
+}
Index: test/CodeGen/builtins-arm64.c
===================================================================
--- test/CodeGen/builtins-arm64.c
+++ test/CodeGen/builtins-arm64.c
@@ -43,3 +43,28 @@
   __builtin_arm_prefetch(0, 0, 0, 0, 0); // plil1keep
 // CHECK: call {{.*}} @llvm.prefetch(i8* null, i32 0, i32 3, i32 0)
 }
+
+unsigned rsr() {
+  // CHECK: call i32 asm {{.*}} "mrs $0, S1_2_C3_C4_5", "=r"()
+  return __builtin_arm_rsr("1:2:3:4:5");
+}
+unsigned long rsr64() {
+  // CHECK: call i64 asm {{.*}} "mrs $0, S1_2_C3_C4_5", "=r"()
+  return __builtin_arm_rsr64("1:2:3:4:5");
+}
+void *rsrp() {
+  // CHECK: call i8* asm {{.*}} "mrs $0, S1_2_C3_C4_5", "=r"()
+  return __builtin_arm_rsrp("1:2:3:4:5");
+}
+void wsr(unsigned v) {
+  // CHECK: call void asm {{.*}} "msr S1_2_C3_C4_5, $0", "r"(i32 %v)
+  __builtin_arm_wsr("1:2:3:4:5", v);
+}
+void wsr64(unsigned long v) {
+  // CHECK: call void asm {{.*}} "msr S1_2_C3_C4_5, $0", "r"(i64 %v)
+  __builtin_arm_wsr64("1:2:3:4:5", v);
+}
+void wsrp(void *v) {
+  // CHECK: call void asm {{.*}} "msr S1_2_C3_C4_5, $0", "r"(i8* %v)
+  __builtin_arm_wsrp("1:2:3:4:5", v);
+}
Index: test/Sema/aarch64-special-register.c
===================================================================
--- /dev/null
+++ test/Sema/aarch64-special-register.c
@@ -0,0 +1,77 @@
+// RUN: %clang_cc1 -ffreestanding -fsyntax-only -verify -triple aarch64 %s
+
+void string_literal(unsigned v) {
+  __builtin_arm_wsr(0, v); // expected-error {{expression is not a string literal}}
+}
+
+void wsr_1(unsigned v) {
+  __builtin_arm_wsr("sysreg", v);
+}
+
+void wsrp_1(void *v) {
+  __builtin_arm_wsrp("sysreg", v);
+}
+
+void wsr64_1(unsigned long v) {
+  __builtin_arm_wsr64("sysreg", v); //expected-error {{invalid special register for builtin}}
+}
+
+unsigned rsr_1() {
+  return __builtin_arm_rsr("sysreg");
+}
+
+void *rsrp_1() {
+  return __builtin_arm_rsrp("sysreg");
+}
+
+unsigned long rsr64_1() {
+  return __builtin_arm_rsr64("sysreg"); //expected-error {{invalid special register for builtin}}
+}
+
+void wsr_2(unsigned v) {
+  __builtin_arm_wsr("0:1:2:3:4", v);
+}
+
+void wsrp_2(void *v) {
+  __builtin_arm_wsrp("0:1:2:3:4", v);
+}
+
+void wsr64_2(unsigned long v) {
+  __builtin_arm_wsr64("0:1:2:3:4", v);
+}
+
+unsigned rsr_2() {
+  return __builtin_arm_rsr("0:1:2:3:4");
+}
+
+void *rsrp_2() {
+  return __builtin_arm_rsrp("0:1:2:3:4");
+}
+
+unsigned long rsr64_2() {
+  return __builtin_arm_rsr64("0:1:2:3:4");
+}
+
+void wsr_3(unsigned v) {
+  __builtin_arm_wsr("0:1:2", v); //expected-error {{invalid special register for builtin}}
+}
+
+void wsrp_3(void *v) {
+  __builtin_arm_wsrp("0:1:2", v); //expected-error {{invalid special register for builtin}}
+}
+
+void wsr64_3(unsigned long v) {
+  __builtin_arm_wsr64("0:1:2", v); //expected-error {{invalid special register for builtin}}
+}
+
+unsigned rsr_3() {
+  return __builtin_arm_rsr("0:1:2"); //expected-error {{invalid special register for builtin}}
+}
+
+void *rsrp_3() {
+  return __builtin_arm_rsrp("0:1:2"); //expected-error {{invalid special register for builtin}}
+}
+
+unsigned long rsr64_3() {
+  return __builtin_arm_rsr64("0:1:2"); //expected-error {{invalid special register for builtin}}
+}
Index: test/Sema/arm-special-register.c
===================================================================
--- /dev/null
+++ test/Sema/arm-special-register.c
@@ -0,0 +1,77 @@
+// RUN: %clang_cc1 -ffreestanding -fsyntax-only -verify -triple arm %s
+
+void string_literal(unsigned v) {
+  __builtin_arm_wsr(0, v); // expected-error {{expression is not a string literal}}
+}
+
+void wsr_1(unsigned v) {
+  __builtin_arm_wsr("sysreg", v);
+}
+
+void wsrp_1(void *v) {
+  __builtin_arm_wsrp("sysreg", v);
+}
+
+void wsr64_1(unsigned long v) {
+  __builtin_arm_wsr64("sysreg", v); //expected-error {{invalid special register for builtin}}
+}
+
+unsigned rsr_1() {
+  return __builtin_arm_rsr("sysreg");
+}
+
+void *rsrp_1() {
+  return __builtin_arm_rsrp("sysreg");
+}
+
+unsigned long rsr64_1() {
+  return __builtin_arm_rsr64("sysreg"); //expected-error {{invalid special register for builtin}}
+}
+
+void wsr_2(unsigned v) {
+  __builtin_arm_wsr("0:1:2:3:4", v);
+}
+
+void wsrp_2(void *v) {
+  __builtin_arm_wsrp("0:1:2:3:4", v);
+}
+
+void wsr64_2(unsigned long v) {
+  __builtin_arm_wsr64("0:1:2:3:4", v); //expected-error {{invalid special register for builtin}}
+}
+
+unsigned rsr_2() {
+  return __builtin_arm_rsr("0:1:2:3:4");
+}
+
+void *rsrp_2() {
+  return __builtin_arm_rsrp("0:1:2:3:4");
+}
+
+unsigned long rsr64_2() {
+  return __builtin_arm_rsr64("0:1:2:3:4"); //expected-error {{invalid special register for builtin}}
+}
+
+void wsr_3(unsigned v) {
+  __builtin_arm_wsr("0:1:2", v); //expected-error {{invalid special register for builtin}}
+}
+
+void wsrp_3(void *v) {
+  __builtin_arm_wsrp("0:1:2", v); //expected-error {{invalid special register for builtin}}
+}
+
+void wsr64_3(unsigned long v) {
+  __builtin_arm_wsr64("0:1:2", v);
+}
+
+unsigned rsr_3() {
+  return __builtin_arm_rsr("0:1:2"); //expected-error {{invalid special register for builtin}}
+}
+
+void *rsrp_3() {
+  return __builtin_arm_rsrp("0:1:2"); //expected-error {{invalid special register for builtin}}
+}
+
+unsigned long rsr64_3() {
+  return __builtin_arm_rsr64("0:1:2");
+}
_______________________________________________
cfe-commits mailing list
[email protected]
http://lists.cs.uiuc.edu/mailman/listinfo/cfe-commits

Reply via email to