labrinea created this revision.
labrinea added reviewers: cfe-commits, t.p.northover, ab, kristof.beyls, 
simon_tatham.
labrinea requested review of this revision.
Herald added a project: clang.

This patch emits the new LLVM IR type introduced in 
https://reviews.llvm.org/D94091 when generating IR for inline assembly source 
code that operates on `data512_t`, as long as the target hooks indicate the 
presence of the LS64 extension.


Repository:
  rG LLVM Github Monorepo

https://reviews.llvm.org/D94098

Files:
  clang/include/clang/Basic/TargetInfo.h
  clang/lib/Basic/Targets/AArch64.cpp
  clang/lib/Basic/Targets/AArch64.h
  clang/lib/CodeGen/CGStmt.cpp
  clang/lib/CodeGen/TargetInfo.cpp
  clang/test/CodeGen/aarch64-ls64-inline-asm.c

Index: clang/test/CodeGen/aarch64-ls64-inline-asm.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/aarch64-ls64-inline-asm.c
@@ -0,0 +1,41 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple aarch64-eabi -target-feature +ls64 -S -emit-llvm -x c %s -o - | FileCheck %s
+// RUN: %clang_cc1 -triple aarch64_be-eabi -target-feature +ls64 -S -emit-llvm -x c %s -o - | FileCheck %s
+
+struct foo { unsigned long long x[8]; };
+
+// CHECK-LABEL: @load(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[OUTPUT_ADDR:%.*]] = alloca %struct.foo*, align 8
+// CHECK-NEXT:    [[ADDR_ADDR:%.*]] = alloca i8*, align 8
+// CHECK-NEXT:    store %struct.foo* [[OUTPUT:%.*]], %struct.foo** [[OUTPUT_ADDR]], align 8
+// CHECK-NEXT:    store i8* [[ADDR:%.*]], i8** [[ADDR_ADDR]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load %struct.foo*, %struct.foo** [[OUTPUT_ADDR]], align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = load i8*, i8** [[ADDR_ADDR]], align 8
+// CHECK-NEXT:    [[TMP2:%.*]] = call aarch64_ls64 asm sideeffect "ld64b $0,[$1]", "=r,r"(i8* [[TMP1]]) [[ATTR1:#.*]], !srcloc !6
+// CHECK-NEXT:    [[TMP3:%.*]] = bitcast %struct.foo* [[TMP0]] to aarch64_ls64*
+// CHECK-NEXT:    store aarch64_ls64 [[TMP2]], aarch64_ls64* [[TMP3]], align 8
+// CHECK-NEXT:    ret void
+//
+void load(struct foo *output, void *addr)
+{
+    __asm__ volatile ("ld64b %0,[%1]" : "=r" (*output) : "r" (addr));
+}
+
+// CHECK-LABEL: @store(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[INPUT_ADDR:%.*]] = alloca %struct.foo*, align 8
+// CHECK-NEXT:    [[ADDR_ADDR:%.*]] = alloca i8*, align 8
+// CHECK-NEXT:    store %struct.foo* [[INPUT:%.*]], %struct.foo** [[INPUT_ADDR]], align 8
+// CHECK-NEXT:    store i8* [[ADDR:%.*]], i8** [[ADDR_ADDR]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load %struct.foo*, %struct.foo** [[INPUT_ADDR]], align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = bitcast %struct.foo* [[TMP0]] to aarch64_ls64*
+// CHECK-NEXT:    [[TMP2:%.*]] = load aarch64_ls64, aarch64_ls64* [[TMP1]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = load i8*, i8** [[ADDR_ADDR]], align 8
+// CHECK-NEXT:    call void asm sideeffect "st64b $0,[$1]", "r,r"(aarch64_ls64 [[TMP2]], i8* [[TMP3]]) [[ATTR1]], !srcloc !7
+// CHECK-NEXT:    ret void
+//
+void store(const struct foo *input, void *addr)
+{
+    __asm__ volatile ("st64b %0,[%1]" : : "r" (*input), "r" (addr));
+}
Index: clang/lib/CodeGen/TargetInfo.cpp
===================================================================
--- clang/lib/CodeGen/TargetInfo.cpp
+++ clang/lib/CodeGen/TargetInfo.cpp
@@ -5533,6 +5533,23 @@
     Fn->addFnAttr("branch-target-enforcement",
                   BPI.BranchTargetEnforcement ? "true" : "false");
   }
+
+  llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
+                                  StringRef Constraint,
+                                  llvm::Type* Ty) const override {
+    if (getABIInfo().getContext().getTargetInfo().hasAArch64_LS64Type()) {
+      if (CGF.CGM.getDataLayout().getTypeSizeInBits(Ty) == 512) {
+        auto *ST = dyn_cast<llvm::StructType>(Ty);
+        if (ST && ST->getNumElements() == 1) {
+          auto *AT = dyn_cast<llvm::ArrayType>(ST->getElementType(0));
+          if (AT && AT->getNumElements() == 8 &&
+              AT->getElementType()->isIntegerTy(64))
+            return llvm::Type::getAArch64_LS64Ty(CGF.getLLVMContext());
+        }
+      }
+    }
+    return Ty;
+  }
 };
 
 class WindowsAArch64TargetCodeGenInfo : public AArch64TargetCodeGenInfo {
Index: clang/lib/CodeGen/CGStmt.cpp
===================================================================
--- clang/lib/CodeGen/CGStmt.cpp
+++ clang/lib/CodeGen/CGStmt.cpp
@@ -2030,6 +2030,7 @@
       Arg = EmitLoadOfLValue(InputValue, Loc).getScalarVal();
     } else {
       llvm::Type *Ty = ConvertType(InputType);
+      Ty = getTargetHooks().adjustInlineAsmType(*this, ConstraintStr, Ty);
       uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty);
       if (Size <= 64 && llvm::isPowerOf2_64(Size)) {
         Ty = llvm::IntegerType::get(getLLVMContext(), Size);
@@ -2037,6 +2038,11 @@
 
         Arg = Builder.CreateLoad(
             Builder.CreateBitCast(InputValue.getAddress(*this), Ty));
+      } else if (Ty->isAArch64_LS64Ty()) {
+        Ty = llvm::PointerType::getUnqual(Ty);
+
+        Arg = Builder.CreateLoad(
+            Builder.CreateBitCast(InputValue.getAddress(*this), Ty));
       } else {
         Arg = InputValue.getPointer(*this);
         ConstraintStr += '*';
@@ -2259,12 +2265,16 @@
       Constraints += "=" + OutputConstraint;
       ResultRegQualTys.push_back(OutExpr->getType());
       ResultRegDests.push_back(Dest);
-      ResultTruncRegTypes.push_back(ConvertTypeForMem(OutExpr->getType()));
+      llvm::Type *Ty = ConvertTypeForMem(OutExpr->getType());
+      Ty = getTargetHooks().adjustInlineAsmType(*this, OutputConstraint, Ty);
+      ResultTruncRegTypes.push_back(Ty);
       if (Info.allowsRegister() && isScalarizableAggregate) {
         ResultTypeRequiresCast.push_back(true);
-        unsigned Size = getContext().getTypeSize(OutExpr->getType());
-        llvm::Type *ConvTy = llvm::IntegerType::get(getLLVMContext(), Size);
-        ResultRegTypes.push_back(ConvTy);
+        if (!Ty->isAArch64_LS64Ty()) {
+          unsigned Size = getContext().getTypeSize(OutExpr->getType());
+          Ty = llvm::IntegerType::get(getLLVMContext(), Size);
+        }
+        ResultRegTypes.push_back(Ty);
       } else {
         ResultTypeRequiresCast.push_back(false);
         ResultRegTypes.push_back(ResultTruncRegTypes.back());
@@ -2574,6 +2584,10 @@
       unsigned Size = getContext().getTypeSize(ResultRegQualTys[i]);
       Address A = Builder.CreateBitCast(Dest.getAddress(*this),
                                         ResultRegTypes[i]->getPointerTo());
+      if (Tmp->getType()->isAArch64_LS64Ty()) {
+        Builder.CreateStore(Tmp, A);
+        continue;
+      }
       QualType Ty = getContext().getIntTypeForBitwidth(Size, /*Signed*/ false);
       if (Ty.isNull()) {
         const Expr *OutExpr = S.getOutputExpr(i);
Index: clang/lib/Basic/Targets/AArch64.h
===================================================================
--- clang/lib/Basic/Targets/AArch64.h
+++ clang/lib/Basic/Targets/AArch64.h
@@ -134,6 +134,8 @@
   bool hasInt128Type() const override;
 
   bool hasExtIntType() const override { return true; }
+
+  bool hasAArch64_LS64Type() const override { return HasLS64; }
 };
 
 class LLVM_LIBRARY_VISIBILITY AArch64leTargetInfo : public AArch64TargetInfo {
Index: clang/lib/Basic/Targets/AArch64.cpp
===================================================================
--- clang/lib/Basic/Targets/AArch64.cpp
+++ clang/lib/Basic/Targets/AArch64.cpp
@@ -705,6 +705,9 @@
       if (Size == 64)
         return true;
 
+      if (Size == 512)
+        return HasLS64;
+
       SuggestedModifier = "w";
       return false;
     }
Index: clang/include/clang/Basic/TargetInfo.h
===================================================================
--- clang/include/clang/Basic/TargetInfo.h
+++ clang/include/clang/Basic/TargetInfo.h
@@ -192,6 +192,7 @@
   bool HasFloat128;
   bool HasFloat16;
   bool HasBFloat16;
+  bool HasAArch64_LS64;
   bool HasStrictFP;
 
   unsigned char MaxAtomicPromoteWidth, MaxAtomicInlineWidth;
@@ -580,6 +581,8 @@
   /// Determine whether the _BFloat16 type is supported on this target.
   virtual bool hasBFloat16Type() const { return HasBFloat16; }
 
+  virtual bool hasAArch64_LS64Type() const { return HasAArch64_LS64; }
+
   /// Determine whether constrained floating point is supported on this target.
   virtual bool hasStrictFP() const { return HasStrictFP; }
 
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to