github-actions[bot] wrote:
<!--LLVM CODE FORMAT COMMENT: {clang-format}-->
:warning: C/C++ code formatter, clang-format found issues in your code.
:warning:
<details>
<summary>
You can test this locally with the following command:
</summary>
``````````bash
git-clang-format --diff origin/main HEAD --extensions cpp --
llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
--diff_from_common_commit
``````````
:warning:
The reproduction instructions above might return results for more than one PR
in a stack if you are using a stacked PR workflow. You can limit the results by
changing `origin/main` to the base branch/commit you want to compare against.
:warning:
</details>
<details>
<summary>
View the diff from clang-format here.
</summary>
``````````diff
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
index 0c8472b75..8951ccfbd 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
@@ -1858,20 +1858,25 @@ bool
AArch64LegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
case Intrinsic::aarch64_neon_srhadd:
return LowerBinOp(TargetOpcode::G_SAVGCEIL);
case Intrinsic::aarch64_neon_sqshrn: {
- if (MRI.getType(MI.getOperand(0).getReg()).isVector())
- {
- // Create right shift instruction. Get v. register the output is written
to
- auto Shr = MIB.buildInstr(AArch64::G_VASHR,
{MRI.getType(MI.getOperand(2).getReg())}, {MI.getOperand(2),
MI.getOperand(3).getImm()});
+ if (MRI.getType(MI.getOperand(0).getReg()).isVector()) {
+ // Create right shift instruction. Get v. register the output is written
+ // to
+ auto Shr = MIB.buildInstr(AArch64::G_VASHR,
+ {MRI.getType(MI.getOperand(2).getReg())},
+ {MI.getOperand(2), MI.getOperand(3).getImm()});
// Build the narrow intrinsic, taking in the v. register of the shift
- MIB.buildInstr(TargetOpcode::G_TRUNC_SSAT_S, {MI.getOperand(0)}, {Shr});
MI.eraseFromParent();
+ MIB.buildInstr(TargetOpcode::G_TRUNC_SSAT_S, {MI.getOperand(0)}, {Shr});
+ MI.eraseFromParent();
}
break;
}
case Intrinsic::aarch64_neon_sqshrun: {
- if (MRI.getType(MI.getOperand(0).getReg()).isVector())
- {
- // Create right shift instruction. Get v. register the output is written
to
- auto Shr = MIB.buildInstr(AArch64::G_VASHR,
{MRI.getType(MI.getOperand(2).getReg())}, {MI.getOperand(2),
MI.getOperand(3).getImm()});
+ if (MRI.getType(MI.getOperand(0).getReg()).isVector()) {
+ // Create right shift instruction. Get v. register the output is written
+ // to
+ auto Shr = MIB.buildInstr(AArch64::G_VASHR,
+ {MRI.getType(MI.getOperand(2).getReg())},
+ {MI.getOperand(2), MI.getOperand(3).getImm()});
// Build the narrow intrinsic, taking in the v. register of the shift
MIB.buildInstr(TargetOpcode::G_TRUNC_SSAT_U, {MI.getOperand(0)}, {Shr});
MI.eraseFromParent();
@@ -1879,10 +1884,12 @@ bool
AArch64LegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
break;
}
case Intrinsic::aarch64_neon_sqrshrn: {
- if (MRI.getType(MI.getOperand(0).getReg()).isVector())
- {
- // Create right shift instruction. Get v. register the output is written
to
- auto Shr = MIB.buildInstr(AArch64::G_SRSHR,
{MRI.getType(MI.getOperand(2).getReg())}, {MI.getOperand(2),
MI.getOperand(3).getImm()});
+ if (MRI.getType(MI.getOperand(0).getReg()).isVector()) {
+ // Create right shift instruction. Get v. register the output is written
+ // to
+ auto Shr = MIB.buildInstr(AArch64::G_SRSHR,
+ {MRI.getType(MI.getOperand(2).getReg())},
+ {MI.getOperand(2), MI.getOperand(3).getImm()});
// Build the narrow intrinsic, taking in the v. register of the shift
MIB.buildInstr(TargetOpcode::G_TRUNC_SSAT_S, {MI.getOperand(0)}, {Shr});
MI.eraseFromParent();
@@ -1890,10 +1897,12 @@ bool
AArch64LegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
break;
}
case Intrinsic::aarch64_neon_sqrshrun: {
- if (MRI.getType(MI.getOperand(0).getReg()).isVector())
- {
- // Create right shift instruction. Get v. register the output is written
to
- auto Shr = MIB.buildInstr(AArch64::G_SRSHR,
{MRI.getType(MI.getOperand(2).getReg())}, {MI.getOperand(2),
MI.getOperand(3).getImm()});
+ if (MRI.getType(MI.getOperand(0).getReg()).isVector()) {
+ // Create right shift instruction. Get v. register the output is written
+ // to
+ auto Shr = MIB.buildInstr(AArch64::G_SRSHR,
+ {MRI.getType(MI.getOperand(2).getReg())},
+ {MI.getOperand(2), MI.getOperand(3).getImm()});
// Build the narrow intrinsic, taking in the v. register of the shift
MIB.buildInstr(TargetOpcode::G_TRUNC_SSAT_U, {MI.getOperand(0)}, {Shr});
MI.eraseFromParent();
@@ -1901,10 +1910,12 @@ bool
AArch64LegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
break;
}
case Intrinsic::aarch64_neon_uqrshrn: {
- if (MRI.getType(MI.getOperand(0).getReg()).isVector())
- {
- // Create right shift instruction. Get v. register the output is written
to
- auto Shr = MIB.buildInstr(AArch64::G_URSHR,
{MRI.getType(MI.getOperand(2).getReg())}, {MI.getOperand(2),
MI.getOperand(3).getImm()});
+ if (MRI.getType(MI.getOperand(0).getReg()).isVector()) {
+ // Create right shift instruction. Get v. register the output is written
+ // to
+ auto Shr = MIB.buildInstr(AArch64::G_URSHR,
+ {MRI.getType(MI.getOperand(2).getReg())},
+ {MI.getOperand(2), MI.getOperand(3).getImm()});
// Build the narrow intrinsic, taking in the v. register of the shift
MIB.buildInstr(TargetOpcode::G_TRUNC_USAT_U, {MI.getOperand(0)}, {Shr});
MI.eraseFromParent();
@@ -1912,25 +1923,30 @@ bool
AArch64LegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
break;
}
case Intrinsic::aarch64_neon_uqshrn: {
- if (MRI.getType(MI.getOperand(0).getReg()).isVector())
- {
- // Create right shift instruction. Get v. register the output is written
to
- auto Shr = MIB.buildInstr(AArch64::G_VLSHR,
{MRI.getType(MI.getOperand(2).getReg())}, {MI.getOperand(2),
MI.getOperand(3).getImm()});
+ if (MRI.getType(MI.getOperand(0).getReg()).isVector()) {
+ // Create right shift instruction. Get v. register the output is written
+ // to
+ auto Shr = MIB.buildInstr(AArch64::G_VLSHR,
+ {MRI.getType(MI.getOperand(2).getReg())},
+ {MI.getOperand(2), MI.getOperand(3).getImm()});
// Build the narrow intrinsic, taking in the v. register of the shift
- MIB.buildInstr(TargetOpcode::G_TRUNC_USAT_U, {MI.getOperand(0)}, {Shr});
MI.eraseFromParent();
+ MIB.buildInstr(TargetOpcode::G_TRUNC_USAT_U, {MI.getOperand(0)}, {Shr});
+ MI.eraseFromParent();
}
break;
}
case Intrinsic::aarch64_neon_sqshlu: {
// Check if last operand is constant vector dup
- auto shiftAmount =
isConstantOrConstantSplatVector(*MRI.getVRegDef(MI.getOperand(3).getReg()),
MRI);
+ auto shiftAmount = isConstantOrConstantSplatVector(
+ *MRI.getVRegDef(MI.getOperand(3).getReg()), MRI);
if (shiftAmount) {
- // If so, create a new intrinsic with the correct shift amount
- MIB.buildInstr(AArch64::G_SQSHLU, {MI.getOperand(0)},
{MI.getOperand(2)}).addImm(shiftAmount->getSExtValue());
- MI.eraseFromParent();
- return true;
+ // If so, create a new intrinsic with the correct shift amount
+ MIB.buildInstr(AArch64::G_SQSHLU, {MI.getOperand(0)}, {MI.getOperand(2)})
+ .addImm(shiftAmount->getSExtValue());
+ MI.eraseFromParent();
+ return true;
} else {
- return false;
+ return false;
}
}
case Intrinsic::aarch64_neon_abs: {
``````````
</details>
https://github.com/llvm/llvm-project/pull/170832
_______________________________________________
llvm-branch-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits