[llvm-branch-commits] [llvm] ad25f8a - X86InstrInfo: Support immediates that are +1/-1 different in optimizeCompareInstr

2022-01-11 Thread Matthias Braun via llvm-branch-commits

Author: Matthias Braun
Date: 2022-01-11T09:07:29-08:00
New Revision: ad25f8a556d239d8b7d17383cf1a0771359521fd

URL: 
https://github.com/llvm/llvm-project/commit/ad25f8a556d239d8b7d17383cf1a0771359521fd
DIFF: 
https://github.com/llvm/llvm-project/commit/ad25f8a556d239d8b7d17383cf1a0771359521fd.diff

LOG: X86InstrInfo: Support immediates that are +1/-1 different in 
optimizeCompareInstr

This is a re-commit of e2c7ee0743592e39274e28dbe0d0c213ba342317 which
was reverted in a2a58d91e82db38fbdf88cc317dcb3753d79d492 and
ea81cea8163a1a0e54df42103ee1c657bbf03791. This includes a fix to
consistently check for EFLAGS being live-out. See phabricator
review.

Original Summary:

This extends `optimizeCompareInstr` to re-use previous comparison
results if the previous comparison was with an immediate that was 1
bigger or smaller. Example:

CMP x, 13
...
CMP x, 12   ; can be removed if we change the SETg
SETg ...; x > 12  changed to `SETge` (x >= 13) removing CMP

Motivation: This often happens because SelectionDAG canonicalization
tends to add/subtract 1 often when optimizing for fallthrough blocks.
Example for `x > C` the fallthrough optimization switches true/false
blocks with `!(x > C)` --> `x <= C` and canonicalization turns this into
`x < C + 1`.

Differential Revision: https://reviews.llvm.org/D110867

Added: 
llvm/test/CodeGen/X86/peep-test-5.ll

Modified: 
llvm/lib/Target/X86/X86InstrInfo.cpp
llvm/lib/Target/X86/X86InstrInfo.h
llvm/test/CodeGen/X86/optimize-compare.mir
llvm/test/CodeGen/X86/use-cr-result-of-dom-icmp-st.ll

Removed: 




diff  --git a/llvm/lib/Target/X86/X86InstrInfo.cpp 
b/llvm/lib/Target/X86/X86InstrInfo.cpp
index c379aa8d9258..4dcd886fa3b2 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -4088,8 +4088,8 @@ bool X86InstrInfo::analyzeCompare(const MachineInstr &MI, 
Register &SrcReg,
 bool X86InstrInfo::isRedundantFlagInstr(const MachineInstr &FlagI,
 Register SrcReg, Register SrcReg2,
 int64_t ImmMask, int64_t ImmValue,
-const MachineInstr &OI,
-bool *IsSwapped) const {
+const MachineInstr &OI, bool 
*IsSwapped,
+int64_t *ImmDelta) const {
   switch (OI.getOpcode()) {
   case X86::CMP64rr:
   case X86::CMP32rr:
@@ -4140,10 +4140,21 @@ bool X86InstrInfo::isRedundantFlagInstr(const 
MachineInstr &FlagI,
   int64_t OIMask;
   int64_t OIValue;
   if (analyzeCompare(OI, OISrcReg, OISrcReg2, OIMask, OIValue) &&
-  SrcReg == OISrcReg && ImmMask == OIMask && OIValue == ImmValue) {
-assert(SrcReg2 == X86::NoRegister && OISrcReg2 == X86::NoRegister &&
-   "should not have 2nd register");
-return true;
+  SrcReg == OISrcReg && ImmMask == OIMask) {
+if (OIValue == ImmValue) {
+  *ImmDelta = 0;
+  return true;
+} else if (static_cast(ImmValue) ==
+   static_cast(OIValue) - 1) {
+  *ImmDelta = -1;
+  return true;
+} else if (static_cast(ImmValue) ==
+   static_cast(OIValue) + 1) {
+  *ImmDelta = 1;
+  return true;
+} else {
+  return false;
+}
   }
 }
 return FlagI.isIdenticalTo(OI);
@@ -4393,6 +4404,7 @@ bool X86InstrInfo::optimizeCompareInstr(MachineInstr 
&CmpInstr, Register SrcReg,
   bool ShouldUpdateCC = false;
   bool IsSwapped = false;
   X86::CondCode NewCC = X86::COND_INVALID;
+  int64_t ImmDelta = 0;
 
   // Search backward from CmpInstr for the next instruction defining EFLAGS.
   const TargetRegisterInfo *TRI = &getRegisterInfo();
@@ -4439,7 +4451,7 @@ bool X86InstrInfo::optimizeCompareInstr(MachineInstr 
&CmpInstr, Register SrcReg,
 // ...   // EFLAGS not changed
 // cmp x, y  // <-- can be removed
 if (isRedundantFlagInstr(CmpInstr, SrcReg, SrcReg2, CmpMask, CmpValue,
- Inst, &IsSwapped)) {
+ Inst, &IsSwapped, &ImmDelta)) {
   Sub = &Inst;
   break;
 }
@@ -4473,7 +4485,7 @@ bool X86InstrInfo::optimizeCompareInstr(MachineInstr 
&CmpInstr, Register SrcReg,
   // It is safe to remove CmpInstr if EFLAGS is redefined or killed.
   // If we are done with the basic block, we need to check whether EFLAGS is
   // live-out.
-  bool IsSafe = false;
+  bool FlagsMayLiveOut = true;
   SmallVector, 4> OpsToUpdate;
   MachineBasicBlock::iterator AfterCmpInstr =
   std::next(MachineBasicBlock::iterator(CmpInstr));
@@ -4483,7 +4495,7 @@ bool X86InstrInfo::optimizeCompareInstr(MachineInstr 
&CmpInstr, Register SrcReg,
 // We should check the usage if this instruction uses and 

[llvm-branch-commits] [llvm] eecd0f0 - Have lit preserve SOURCE_DATE_EPOCH

2022-01-11 Thread Tom Stellard via llvm-branch-commits

Author: serge-sans-paille
Date: 2022-01-11T18:23:09-08:00
New Revision: eecd0f055baa46d1edc57cd8df0c451cefb70359

URL: 
https://github.com/llvm/llvm-project/commit/eecd0f055baa46d1edc57cd8df0c451cefb70359
DIFF: 
https://github.com/llvm/llvm-project/commit/eecd0f055baa46d1edc57cd8df0c451cefb70359.diff

LOG: Have lit preserve SOURCE_DATE_EPOCH

This environment variable has been standardized for reproducible builds. Setting
it can help to have reproducible tests too, so keep it as part of the testing
env when set.

See https://reproducible-builds.org/docs/source-date-epoch/

Differential Revision: https://reviews.llvm.org/D108332

(cherry picked from commit 46c947af7ead0a939fbd7a93c370e7ead2128d07)

Added: 


Modified: 
llvm/utils/lit/lit/TestingConfig.py

Removed: 




diff  --git a/llvm/utils/lit/lit/TestingConfig.py 
b/llvm/utils/lit/lit/TestingConfig.py
index d534d895e4bae..37558bd5059c0 100644
--- a/llvm/utils/lit/lit/TestingConfig.py
+++ b/llvm/utils/lit/lit/TestingConfig.py
@@ -28,7 +28,7 @@ def fromdefaults(litConfig):
  'TMPDIR', 'TMP', 'TEMP', 'TEMPDIR', 'AVRLIT_BOARD',
  'AVRLIT_PORT', 'FILECHECK_OPTS', 'VCINSTALLDIR',
  'VCToolsinstallDir', 'VSINSTALLDIR', 'WindowsSdkDir',
- 'WindowsSDKLibVersion']
+ 'WindowsSDKLibVersion', 'SOURCE_DATE_EPOCH']
 
 if sys.platform == 'win32':
 pass_vars.append('INCLUDE')



___
llvm-branch-commits mailing list
llvm-branch-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits


[llvm-branch-commits] [llvm] eaeb7dc - ARM: make FastISel & GISel pass -1 to ADJCALLSTACKUP to signal no callee pop.

2022-01-11 Thread Tom Stellard via llvm-branch-commits

Author: Tim Northover
Date: 2022-01-11T21:08:21-08:00
New Revision: eaeb7dcf32495155b5d430d2a650a211c9b230af

URL: 
https://github.com/llvm/llvm-project/commit/eaeb7dcf32495155b5d430d2a650a211c9b230af
DIFF: 
https://github.com/llvm/llvm-project/commit/eaeb7dcf32495155b5d430d2a650a211c9b230af.diff

LOG: ARM: make FastISel & GISel pass -1 to ADJCALLSTACKUP to signal no callee 
pop.

The interface for these instructions changed with support for mandatory tail
calls, and now -1 indicates the CalleePopAmount argument is not valid.
Unfortunately I didn't realise FastISel or GISel did calls at the time so
didn't update them.

(cherry picked from commit 0b5b35fdbdbf029bb6915e183541556c4eeadd3f)

Added: 
llvm/test/CodeGen/ARM/fast-call-frame-restore.ll

Modified: 
llvm/lib/Target/ARM/ARMCallLowering.cpp
llvm/lib/Target/ARM/ARMFastISel.cpp
llvm/test/CodeGen/ARM/GlobalISel/arm-call-lowering.ll
llvm/test/CodeGen/ARM/GlobalISel/arm-param-lowering.ll
llvm/test/CodeGen/ARM/GlobalISel/irtranslator-varargs-lowering.ll

Removed: 




diff  --git a/llvm/lib/Target/ARM/ARMCallLowering.cpp 
b/llvm/lib/Target/ARM/ARMCallLowering.cpp
index aff7ec8d2ed63..256a95b94f6c3 100644
--- a/llvm/lib/Target/ARM/ARMCallLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMCallLowering.cpp
@@ -525,7 +525,7 @@ bool ARMCallLowering::lowerCall(MachineIRBuilder 
&MIRBuilder, CallLoweringInfo &
 
   MIRBuilder.buildInstr(ARM::ADJCALLSTACKUP)
   .addImm(ArgAssigner.StackOffset)
-  .addImm(0)
+  .addImm(-1ULL)
   .add(predOps(ARMCC::AL));
 
   return true;

diff  --git a/llvm/lib/Target/ARM/ARMFastISel.cpp 
b/llvm/lib/Target/ARM/ARMFastISel.cpp
index 28a076edd6dcd..9224c2221f4d7 100644
--- a/llvm/lib/Target/ARM/ARMFastISel.cpp
+++ b/llvm/lib/Target/ARM/ARMFastISel.cpp
@@ -2022,7 +2022,7 @@ bool ARMFastISel::FinishCall(MVT RetVT, 
SmallVectorImpl &UsedRegs,
   unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
   TII.get(AdjStackUp))
-  .addImm(NumBytes).addImm(0));
+  .addImm(NumBytes).addImm(-1ULL));
 
   // Now the return value.
   if (RetVT != MVT::isVoid) {

diff  --git a/llvm/test/CodeGen/ARM/GlobalISel/arm-call-lowering.ll 
b/llvm/test/CodeGen/ARM/GlobalISel/arm-call-lowering.ll
index ed4be25df14ea..b50460a647530 100644
--- a/llvm/test/CodeGen/ARM/GlobalISel/arm-call-lowering.ll
+++ b/llvm/test/CodeGen/ARM/GlobalISel/arm-call-lowering.ll
@@ -11,7 +11,7 @@ define arm_aapcscc void @test_indirect_call(void() *%fptr) {
   ; NOV4T:   [[COPY:%[0-9]+]]:tgpr(p0) = COPY $r0
   ; NOV4T:   ADJCALLSTACKDOWN 0, 0, 14 /* CC::al */, $noreg, implicit-def $sp, 
implicit $sp
   ; NOV4T:   BMOVPCRX_CALL [[COPY]](p0), csr_aapcs, implicit-def $lr, implicit 
$sp
-  ; NOV4T:   ADJCALLSTACKUP 0, 0, 14 /* CC::al */, $noreg, implicit-def $sp, 
implicit $sp
+  ; NOV4T:   ADJCALLSTACKUP 0, -1, 14 /* CC::al */, $noreg, implicit-def $sp, 
implicit $sp
   ; NOV4T:   MOVPCLR 14 /* CC::al */, $noreg
   ; V4T-LABEL: name: test_indirect_call
   ; V4T: bb.1.entry:
@@ -19,7 +19,7 @@ define arm_aapcscc void @test_indirect_call(void() *%fptr) {
   ; V4T:   [[COPY:%[0-9]+]]:tgpr(p0) = COPY $r0
   ; V4T:   ADJCALLSTACKDOWN 0, 0, 14 /* CC::al */, $noreg, implicit-def $sp, 
implicit $sp
   ; V4T:   BX_CALL [[COPY]](p0), csr_aapcs, implicit-def $lr, implicit $sp
-  ; V4T:   ADJCALLSTACKUP 0, 0, 14 /* CC::al */, $noreg, implicit-def $sp, 
implicit $sp
+  ; V4T:   ADJCALLSTACKUP 0, -1, 14 /* CC::al */, $noreg, implicit-def $sp, 
implicit $sp
   ; V4T:   BX_RET 14 /* CC::al */, $noreg
   ; V5T-LABEL: name: test_indirect_call
   ; V5T: bb.1.entry:
@@ -27,7 +27,7 @@ define arm_aapcscc void @test_indirect_call(void() *%fptr) {
   ; V5T:   [[COPY:%[0-9]+]]:gpr(p0) = COPY $r0
   ; V5T:   ADJCALLSTACKDOWN 0, 0, 14 /* CC::al */, $noreg, implicit-def $sp, 
implicit $sp
   ; V5T:   BLX [[COPY]](p0), csr_aapcs, implicit-def $lr, implicit $sp
-  ; V5T:   ADJCALLSTACKUP 0, 0, 14 /* CC::al */, $noreg, implicit-def $sp, 
implicit $sp
+  ; V5T:   ADJCALLSTACKUP 0, -1, 14 /* CC::al */, $noreg, implicit-def $sp, 
implicit $sp
   ; V5T:   BX_RET 14 /* CC::al */, $noreg
   ; THUMB-LABEL: name: test_indirect_call
   ; THUMB: bb.1.entry:
@@ -35,7 +35,7 @@ define arm_aapcscc void @test_indirect_call(void() *%fptr) {
   ; THUMB:   [[COPY:%[0-9]+]]:gpr(p0) = COPY $r0
   ; THUMB:   ADJCALLSTACKDOWN 0, 0, 14 /* CC::al */, $noreg, implicit-def $sp, 
implicit $sp
   ; THUMB:   tBLXr 14 /* CC::al */, $noreg, [[COPY]](p0), csr_aapcs, 
implicit-def $lr, implicit $sp
-  ; THUMB:   ADJCALLSTACKUP 0, 0, 14 /* CC::al */, $noreg, implicit-def $sp, 
implicit $sp
+  ; THUMB:   ADJCALLSTACKUP 0, -1, 14 /* CC::al */, $noreg, implicit-def $sp, 
implicit $sp
   ; THUMB:   tBX_RET 14 /* CC::al */, $noreg
 entry:
   notail call arm_aapcscc void %fptr()
@@ -49,25 +49,25 @@ define arm_aapcscc voi

[llvm-branch-commits] [llvm] 52a400d - [InlineCost] model calls to llvm.is.constant* more carefully

2022-01-11 Thread Tom Stellard via llvm-branch-commits

Author: Nick Desaulniers
Date: 2022-01-11T21:12:29-08:00
New Revision: 52a400d8e4c46876fae5d732c92df254a9ceae8f

URL: 
https://github.com/llvm/llvm-project/commit/52a400d8e4c46876fae5d732c92df254a9ceae8f
DIFF: 
https://github.com/llvm/llvm-project/commit/52a400d8e4c46876fae5d732c92df254a9ceae8f.diff

LOG: [InlineCost] model calls to llvm.is.constant* more carefully

llvm.is.constant* intrinsics are evaluated to 0 or 1 integral values.

A common use case for llvm.is.constant comes from the higher level
__builtin_constant_p. A common usage pattern of __builtin_constant_p in
the Linux kernel is:

void foo (int bar) {
  if (__builtin_constant_p(bar)) {
// lots of code that will fold away to a constant.
  } else {
// a little bit of code, usually a libcall.
  }
}

A minor issue in InlineCost calculations is when `bar` is _not_ Constant
and still will not be after inlining, we don't discount the true branch
and the inline cost of `foo` ends up being the cost of both branches
together, rather than just the false branch.

This leads to code like the above where inlining will not help prove bar
Constant, but it still would be beneficial to inline foo, because the
"true" branch is irrelevant from a cost perspective.

For example, IPSCCP can sink a passed constant argument to foo:

const int x = 42;
void bar (void) { foo(x); }

This improves our inlining decisions, and fixes a few head scratching
cases were the disassembly shows a relatively small `foo` not inlined
into a lone caller.

We could further improve this modeling by tracking whether the argument
to llvm.is.constant* is a parameter of the function, and if inlining
would allow that parameter to become Constant. This idea is noted in a
FIXME comment.

Link: https://github.com/ClangBuiltLinux/linux/issues/1302

Reviewed By: kazu

Differential Revision: https://reviews.llvm.org/D111272

(cherry picked from commit 9697f93587f46300814f1c6c68af347441d6e05d)

Added: 
llvm/test/Transforms/Inline/call-intrinsic-is-constant.ll

Modified: 
llvm/lib/Analysis/InlineCost.cpp

Removed: 




diff  --git a/llvm/lib/Analysis/InlineCost.cpp 
b/llvm/lib/Analysis/InlineCost.cpp
index 4c2413e14435e..e8f79a28a8e82 100644
--- a/llvm/lib/Analysis/InlineCost.cpp
+++ b/llvm/lib/Analysis/InlineCost.cpp
@@ -354,6 +354,7 @@ class CallAnalyzer : public InstVisitor 
{
   bool simplifyCallSite(Function *F, CallBase &Call);
   template 
   bool simplifyInstruction(Instruction &I, Callable Evaluate);
+  bool simplifyIntrinsicCallIsConstant(CallBase &CB);
   ConstantInt *stripAndComputeInBoundsConstantOffsets(Value *&V);
 
   /// Return true if the given argument to the function being considered for
@@ -1471,6 +1472,27 @@ bool CallAnalyzer::simplifyInstruction(Instruction &I, 
Callable Evaluate) {
   return true;
 }
 
+/// Try to simplify a call to llvm.is.constant.
+///
+/// Duplicate the argument checking from CallAnalyzer::simplifyCallSite since
+/// we expect calls of this specific intrinsic to be infrequent.
+///
+/// FIXME: Given that we know CB's parent (F) caller
+/// (CandidateCall->getParent()->getParent()), we might be able to determine
+/// whether inlining F into F's caller would change how the call to
+/// llvm.is.constant would evaluate.
+bool CallAnalyzer::simplifyIntrinsicCallIsConstant(CallBase &CB) {
+  Value *Arg = CB.getArgOperand(0);
+  auto *C = dyn_cast(Arg);
+
+  if (!C)
+C = dyn_cast_or_null(SimplifiedValues.lookup(Arg));
+
+  Type *RT = CB.getFunctionType()->getReturnType();
+  SimplifiedValues[&CB] = ConstantInt::get(RT, C ? 1 : 0);
+  return true;
+}
+
 bool CallAnalyzer::visitBitCast(BitCastInst &I) {
   // Propagate constants through bitcasts.
   if (simplifyInstruction(I, [&](SmallVectorImpl &COps) {
@@ -2091,6 +2113,8 @@ bool CallAnalyzer::visitCallBase(CallBase &Call) {
   if (auto *SROAArg = getSROAArgForValueOrNull(II->getOperand(0)))
 SROAArgValues[II] = SROAArg;
   return true;
+case Intrinsic::is_constant:
+  return simplifyIntrinsicCallIsConstant(Call);
 }
   }
 

diff  --git a/llvm/test/Transforms/Inline/call-intrinsic-is-constant.ll 
b/llvm/test/Transforms/Inline/call-intrinsic-is-constant.ll
new file mode 100644
index 0..3c96267a3fd5a
--- /dev/null
+++ b/llvm/test/Transforms/Inline/call-intrinsic-is-constant.ll
@@ -0,0 +1,39 @@
+; RUN: opt %s -passes=inline -inline-threshold=20 -S | FileCheck %s
+
+declare i1 @llvm.is.constant.i64(i64)
+declare void @foo()
+
+define void @callee(i64 %val) {
+  %cond = call i1 @llvm.is.constant.i64(i64 %val)
+  br i1 %cond, label %cond.true, label %cond.false
+
+cond.true:
+; Rack up costs with a couple of function calls so that this function
+; gets inlined only when @llvm.is.constant.i64 is folded.  In reality,
+; the "then" clause of __builtin_constant_p tends to have statements
+; that fold very well, so the cost of the "then" clause is 

[llvm-branch-commits] [lld] 9d9efb1 - [lld][CMake] Add LLD_DEFAULT_NOSTART_STOP_GC

2022-01-11 Thread Tom Stellard via llvm-branch-commits

Author: Fangrui Song
Date: 2022-01-11T22:01:00-08:00
New Revision: 9d9efb1f67ff70e996b1cb7fa00e24b9121be226

URL: 
https://github.com/llvm/llvm-project/commit/9d9efb1f67ff70e996b1cb7fa00e24b9121be226
DIFF: 
https://github.com/llvm/llvm-project/commit/9d9efb1f67ff70e996b1cb7fa00e24b9121be226.diff

LOG: [lld][CMake] Add LLD_DEFAULT_NOSTART_STOP_GC

This option is for groups who need time to accomodate the ld.lld -z
start-stop-gc default.

This is a modified version of https://reviews.llvm.org/D114186 that
enables this option by default.

Added: 


Modified: 
lld/CMakeLists.txt
lld/ELF/Driver.cpp
lld/test/CMakeLists.txt
lld/test/ELF/gc-sections-metadata-startstop.s
lld/test/ELF/gc-sections-startstop-hint.s
lld/test/ELF/gc-sections-startstop.s
lld/test/lit.cfg.py
lld/test/lit.site.cfg.py.in

Removed: 




diff  --git a/lld/CMakeLists.txt b/lld/CMakeLists.txt
index 2e99564f4e3e..2e8b502a2855 100644
--- a/lld/CMakeLists.txt
+++ b/lld/CMakeLists.txt
@@ -176,6 +176,15 @@ if (LLD_DEFAULT_LD_LLD_IS_MINGW)
   add_definitions("-DLLD_DEFAULT_LD_LLD_IS_MINGW=1")
 endif()
 
+option(LLD_DEFAULT_NOSTART_STOP_GC
+  "Default ld.lld to -z nostart-stop-gc. If ON, C identifier name sections are
+  forced retained by __start_/__stop_ references. This may increase output size
+  for many instrumentations, but is compatible with GNU ld newer than 2015-10"
+  ON)
+if (LLD_DEFAULT_NOSTART_STOP_GC)
+  add_definitions("-DLLD_DEFAULT_NOSTART_STOP_GC=1")
+endif()
+
 if (MSVC)
   add_definitions(-wd4530) # Suppress 'warning C4530: C++ exception handler 
used, but unwind semantics are not enabled.'
   add_definitions(-wd4062) # Suppress 'warning C4062: enumerator X in switch 
of enum Y is not handled' from system header.

diff  --git a/lld/ELF/Driver.cpp b/lld/ELF/Driver.cpp
index 594c20016827..a4187eeda15c 100644
--- a/lld/ELF/Driver.cpp
+++ b/lld/ELF/Driver.cpp
@@ -1193,8 +1193,17 @@ static void readConfigs(opt::InputArgList &args) {
   config->zSeparate = getZSeparate(args);
   config->zShstk = hasZOption(args, "shstk");
   config->zStackSize = args::getZOptionValue(args, OPT_z, "stack-size", 0);
+#ifdef LLD_DEFAULT_NOSTART_STOP_GC
+  // -z start-stop-gc default matches GNU ld<2015-10 and ld64 section$start
+  // symbols and can decrease file size for many instrumentations.  However,
+  // some users need time to accommodate the -z nostart-stop-gc default, so 
this
+  // is added as a temporary workaround.
+  config->zStartStopGC =
+  getZFlag(args, "start-stop-gc", "nostart-stop-gc", false);
+#else
   config->zStartStopGC =
   getZFlag(args, "start-stop-gc", "nostart-stop-gc", true);
+#endif
   config->zStartStopVisibility = getZStartStopVisibility(args);
   config->zText = getZFlag(args, "text", "notext", true);
   config->zWxneeded = hasZOption(args, "wxneeded");

diff  --git a/lld/test/CMakeLists.txt b/lld/test/CMakeLists.txt
index 0c42427e006b..f0a5493c4c3f 100644
--- a/lld/test/CMakeLists.txt
+++ b/lld/test/CMakeLists.txt
@@ -11,6 +11,7 @@ llvm_canonicalize_cmake_booleans(
   LLVM_ENABLE_ZLIB
   LLVM_ENABLE_LIBXML2
   LLD_DEFAULT_LD_LLD_IS_MINGW
+  LLD_DEFAULT_NOSTART_STOP_GC
   LLVM_HAVE_LIBXAR
   )
 

diff  --git a/lld/test/ELF/gc-sections-metadata-startstop.s 
b/lld/test/ELF/gc-sections-metadata-startstop.s
index 76134525b4ce..d77de240957c 100644
--- a/lld/test/ELF/gc-sections-metadata-startstop.s
+++ b/lld/test/ELF/gc-sections-metadata-startstop.s
@@ -1,4 +1,5 @@
 # REQUIRES: x86
+# UNSUPPORTED: default-nostart-stop-gc
 # LINK_ORDER cnamed sections are not kept alive by the __start_* reference.
 
 # RUN: llvm-mc -filetype=obj -triple=x86_64-pc-linux %s -o %t.o

diff  --git a/lld/test/ELF/gc-sections-startstop-hint.s 
b/lld/test/ELF/gc-sections-startstop-hint.s
index 33d088fa3af7..4069d1cb4bcb 100644
--- a/lld/test/ELF/gc-sections-startstop-hint.s
+++ b/lld/test/ELF/gc-sections-startstop-hint.s
@@ -1,4 +1,5 @@
 # REQUIRES: x86
+# UNSUPPORTED: default-nostart-stop-gc
 ## Some projects may not work with GNU ld<2015-10 (ld.lld 13.0.0) 
--gc-sections behavior.
 ## Give a hint.
 

diff  --git a/lld/test/ELF/gc-sections-startstop.s 
b/lld/test/ELF/gc-sections-startstop.s
index 569cd3cdd10f..6f327a8c37dd 100644
--- a/lld/test/ELF/gc-sections-startstop.s
+++ b/lld/test/ELF/gc-sections-startstop.s
@@ -1,6 +1,7 @@
 ## Check that group members are retained or discarded as a unit.
 
 # REQUIRES: x86
+# UNSUPPORTED: default-nostart-stop-gc
 
 # RUN: llvm-mc -filetype=obj -triple=x86_64-unknown-linux %s -o %t.o
 # RUN: ld.lld %t.o --gc-sections -o %t

diff  --git a/lld/test/lit.cfg.py b/lld/test/lit.cfg.py
index 225104243bf2..c23a6467fc42 100644
--- a/lld/test/lit.cfg.py
+++ b/lld/test/lit.cfg.py
@@ -118,3 +118,6 @@
 # ELF tests expect the default target for ld.lld to be ELF.
 if config.ld_lld_default_mingw:
 config.excludes.append('ELF')
+
+if config.ld_lld_default_nostart_stop_gc:
+config.available_featu