https://github.com/tstellar updated 
https://github.com/llvm/llvm-project/pull/92143

>From a7cd0c61123889a632ceea67dc8c8e2c8753ae08 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.top...@sifive.com>
Date: Thu, 16 May 2024 12:27:05 -0700
Subject: [PATCH] [RISCV] Add a unaligned-scalar-mem feature like we had in
 clang 17.

This is ORed with the fast-unaligned-access feature which applies
to scalar and vector together.:
---
 llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp | 5 +++--
 llvm/lib/Target/RISCV/RISCVFeatures.td           | 5 +++++
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp      | 9 ++++++---
 llvm/test/CodeGen/RISCV/memcpy-inline.ll         | 4 ++++
 llvm/test/CodeGen/RISCV/memcpy.ll                | 4 ++++
 llvm/test/CodeGen/RISCV/memset-inline.ll         | 4 ++++
 llvm/test/CodeGen/RISCV/pr56110.ll               | 1 +
 llvm/test/CodeGen/RISCV/unaligned-load-store.ll  | 4 ++++
 8 files changed, 31 insertions(+), 5 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp 
b/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp
index 0a314fdd41cbe..89207640ee54a 100644
--- a/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp
+++ b/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp
@@ -317,8 +317,9 @@ bool 
RISCVExpandPseudo::expandRV32ZdinxStore(MachineBasicBlock &MBB,
       .addReg(MBBI->getOperand(1).getReg())
       .add(MBBI->getOperand(2));
   if (MBBI->getOperand(2).isGlobal() || MBBI->getOperand(2).isCPI()) {
-    // FIXME: Zdinx RV32 can not work on unaligned memory.
-    assert(!STI->hasFastUnalignedAccess());
+    // FIXME: Zdinx RV32 can not work on unaligned scalar memory.
+    assert(!STI->hasFastUnalignedAccess() &&
+           !STI->enableUnalignedScalarMem());
 
     assert(MBBI->getOperand(2).getOffset() % 8 == 0);
     MBBI->getOperand(2).setOffset(MBBI->getOperand(2).getOffset() + 4);
diff --git a/llvm/lib/Target/RISCV/RISCVFeatures.td 
b/llvm/lib/Target/RISCV/RISCVFeatures.td
index 26451c80f57b4..1bb6b6a561f4a 100644
--- a/llvm/lib/Target/RISCV/RISCVFeatures.td
+++ b/llvm/lib/Target/RISCV/RISCVFeatures.td
@@ -1025,6 +1025,11 @@ def FeatureFastUnalignedAccess
                       "true", "Has reasonably performant unaligned "
                       "loads and stores (both scalar and vector)">;
 
+def FeatureUnalignedScalarMem
+   : SubtargetFeature<"unaligned-scalar-mem", "EnableUnalignedScalarMem",
+                      "true", "Has reasonably performant unaligned scalar "
+                      "loads and stores">;
+
 def FeaturePostRAScheduler : SubtargetFeature<"use-postra-scheduler",
     "UsePostRAScheduler", "true", "Schedule again after register allocation">;
 
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp 
b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index d46093b9e260a..3fe7ddfdd4279 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -1883,7 +1883,8 @@ bool 
RISCVTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
   // replace. If we don't support unaligned scalar mem, prefer the constant
   // pool.
   // TODO: Can the caller pass down the alignment?
-  if (!Subtarget.hasFastUnalignedAccess())
+  if (!Subtarget.hasFastUnalignedAccess() &&
+      !Subtarget.enableUnalignedScalarMem())
     return true;
 
   // Prefer to keep the load if it would require many instructions.
@@ -19772,8 +19773,10 @@ bool 
RISCVTargetLowering::allowsMisalignedMemoryAccesses(
     unsigned *Fast) const {
   if (!VT.isVector()) {
     if (Fast)
-      *Fast = Subtarget.hasFastUnalignedAccess();
-    return Subtarget.hasFastUnalignedAccess();
+      *Fast = Subtarget.hasFastUnalignedAccess() ||
+              Subtarget.enableUnalignedScalarMem();
+    return Subtarget.hasFastUnalignedAccess() ||
+           Subtarget.enableUnalignedScalarMem();
   }
 
   // All vector implementations must support element alignment
diff --git a/llvm/test/CodeGen/RISCV/memcpy-inline.ll 
b/llvm/test/CodeGen/RISCV/memcpy-inline.ll
index 343695ee37da8..709b8264b5833 100644
--- a/llvm/test/CodeGen/RISCV/memcpy-inline.ll
+++ b/llvm/test/CodeGen/RISCV/memcpy-inline.ll
@@ -7,6 +7,10 @@
 ; RUN:   | FileCheck %s --check-prefixes=RV32-BOTH,RV32-FAST
 ; RUN: llc < %s -mtriple=riscv64 -mattr=+fast-unaligned-access \
 ; RUN:   | FileCheck %s --check-prefixes=RV64-BOTH,RV64-FAST
+; RUN: llc < %s -mtriple=riscv32 -mattr=+unaligned-scalar-mem \
+; RUN:   | FileCheck %s --check-prefixes=RV32-BOTH,RV32-FAST
+; RUN: llc < %s -mtriple=riscv64 -mattr=+unaligned-scalar-mem \
+; RUN:   | FileCheck %s --check-prefixes=RV64-BOTH,RV64-FAST
 
 ; ----------------------------------------------------------------------
 ; Fully unaligned cases
diff --git a/llvm/test/CodeGen/RISCV/memcpy.ll 
b/llvm/test/CodeGen/RISCV/memcpy.ll
index 12ec0881b20d9..f8f5d25947d7f 100644
--- a/llvm/test/CodeGen/RISCV/memcpy.ll
+++ b/llvm/test/CodeGen/RISCV/memcpy.ll
@@ -7,6 +7,10 @@
 ; RUN:   | FileCheck %s --check-prefixes=RV32-BOTH,RV32-FAST
 ; RUN: llc < %s -mtriple=riscv64 -mattr=+fast-unaligned-access \
 ; RUN:   | FileCheck %s --check-prefixes=RV64-BOTH,RV64-FAST
+; RUN: llc < %s -mtriple=riscv32 -mattr=+unaligned-scalar-mem \
+; RUN:   | FileCheck %s --check-prefixes=RV32-BOTH,RV32-FAST
+; RUN: llc < %s -mtriple=riscv64 -mattr=+unaligned-scalar-mem \
+; RUN:   | FileCheck %s --check-prefixes=RV64-BOTH,RV64-FAST
 %struct.x = type { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }
 
 @src = external dso_local global %struct.x
diff --git a/llvm/test/CodeGen/RISCV/memset-inline.ll 
b/llvm/test/CodeGen/RISCV/memset-inline.ll
index cc22b77c641e2..e131a3480c89e 100644
--- a/llvm/test/CodeGen/RISCV/memset-inline.ll
+++ b/llvm/test/CodeGen/RISCV/memset-inline.ll
@@ -7,6 +7,10 @@
 ; RUN:   | FileCheck %s --check-prefixes=RV32-BOTH,RV32-FAST
 ; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+fast-unaligned-access \
 ; RUN:   | FileCheck %s --check-prefixes=RV64-BOTH,RV64-FAST
+; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+unaligned-scalar-mem \
+; RUN:   | FileCheck %s --check-prefixes=RV32-BOTH,RV32-FAST
+; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+unaligned-scalar-mem \
+; RUN:   | FileCheck %s --check-prefixes=RV64-BOTH,RV64-FAST
 %struct.x = type { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }
 
 declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) nounwind
diff --git a/llvm/test/CodeGen/RISCV/pr56110.ll 
b/llvm/test/CodeGen/RISCV/pr56110.ll
index c795b17419f56..fcf3f691d56e0 100644
--- a/llvm/test/CodeGen/RISCV/pr56110.ll
+++ b/llvm/test/CodeGen/RISCV/pr56110.ll
@@ -1,6 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=riscv32 | FileCheck %s
 ; RUN: llc < %s -mtriple=riscv32 -mattr=+fast-unaligned-access | FileCheck %s
+; RUN: llc < %s -mtriple=riscv32 -mattr=+unaligned-scalar-mem | FileCheck %s
 
 define void @foo_set(ptr nocapture noundef %a, i32 noundef %v) {
 ; CHECK-LABEL: foo_set:
diff --git a/llvm/test/CodeGen/RISCV/unaligned-load-store.ll 
b/llvm/test/CodeGen/RISCV/unaligned-load-store.ll
index 599b0d08629ea..641686dce97f6 100644
--- a/llvm/test/CodeGen/RISCV/unaligned-load-store.ll
+++ b/llvm/test/CodeGen/RISCV/unaligned-load-store.ll
@@ -7,6 +7,10 @@
 ; RUN:   | FileCheck -check-prefixes=ALL,FAST,RV32I-FAST %s
 ; RUN: llc -mtriple=riscv64 -mattr=+fast-unaligned-access 
-verify-machineinstrs < %s \
 ; RUN:   | FileCheck -check-prefixes=ALL,FAST,RV64I-FAST %s
+; RUN: llc -mtriple=riscv32 -mattr=+unaligned-scalar-mem -verify-machineinstrs 
< %s \
+; RUN:   | FileCheck -check-prefixes=ALL,FAST,RV32I-FAST %s
+; RUN: llc -mtriple=riscv64 -mattr=+unaligned-scalar-mem -verify-machineinstrs 
< %s \
+; RUN:   | FileCheck -check-prefixes=ALL,FAST,RV64I-FAST %s
 
 ; A collection of cases showing codegen for unaligned loads and stores
 

_______________________________________________
llvm-branch-commits mailing list
llvm-branch-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits

Reply via email to