[x86_64][windows][swift] do not use Swift async extended frame for wi… (#80468)
…ndows x86_64
targets that use windows 64 prologue
Windows x86_64 stack frame layout is currently not compatible with
Swift's async extended frame, which reserves the slot right below RBP
(RBP-8) for the async context pointer, as it doesn't account for the
fact that a stack object in a win64 frame can be allocated at the same
location. This can cause issues at runtime, for instance, Swift's TCA
test code has functions that fail because of this issue, as they spill a
value to that slack slot, which then gets overwritten by a store into
address returned by the @llvm.swift.async.context.addr() intrinsic (that
ends up being RBP - 8), leading to an incorrect value being used at a
later point when that stack slot is being read from again. This change
drops the use of async extended frame for windows x86_64 subtargets and
instead uses the x32 based approach of allocating a separate stack slot
for the stored async context pointer.
Additionally, LLDB which is the primary consumer of the extended frame
makes assumptions like checking for a saved previous frame pointer at
the current frame pointer address, which is also incompatible with the
windows x86_64 frame layout, as the previous frame pointer is not
guaranteed to be stored at the current frame pointer address. Therefore
the extended frame layout can be turned off to fix the current
miscompile without introducing regression into LLDB for windows x86_64
as it already doesn't work correctly. I am still investigating what
should be made for LLDB to support using an allocated stack slot to
store the async frame context instead of being located at RBP - 8 for
windows.
diff --git a/llvm/lib/Target/X86/X86FrameLowering.cpp b/llvm/lib/Target/X86/X86FrameLowering.cpp
index fc2d4fb..be416fb 100644
--- a/llvm/lib/Target/X86/X86FrameLowering.cpp
+++ b/llvm/lib/Target/X86/X86FrameLowering.cpp
@@ -1605,6 +1605,9 @@
[[fallthrough]];
case SwiftAsyncFramePointerMode::Always:
+ assert(
+ !IsWin64Prologue &&
+ "win64 prologue does not set the bit 60 in the saved frame pointer");
BuildMI(MBB, MBBI, DL, TII.get(X86::BTS64ri8), MachineFramePtr)
.addUse(MachineFramePtr)
.addImm(60)
@@ -1747,6 +1750,8 @@
if (!IsFunclet) {
if (X86FI->hasSwiftAsyncContext()) {
+ assert(!IsWin64Prologue &&
+ "win64 prologue does not store async context right below rbp");
const auto &Attrs = MF.getFunction().getAttributes();
// Before we update the live frame pointer we have to ensure there's a
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 6dc3df2..9a9b068 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -26566,6 +26566,15 @@
return DAG.getMemIntrinsicNode(Opc, DL, VTs, Ops, MemVT, MMO);
}
+bool X86::isExtendedSwiftAsyncFrameSupported(const X86Subtarget &Subtarget,
+ const MachineFunction &MF) {
+ if (!Subtarget.is64Bit())
+ return false;
+ // 64-bit targets support extended Swift async frame setup,
+ // except for targets that use the windows 64 prologue.
+ return !MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
+}
+
static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
unsigned IntNo = Op.getConstantOperandVal(1);
@@ -26577,7 +26586,7 @@
SDLoc dl(Op);
auto &MF = DAG.getMachineFunction();
auto X86FI = MF.getInfo<X86MachineFunctionInfo>();
- if (Subtarget.is64Bit()) {
+ if (X86::isExtendedSwiftAsyncFrameSupported(Subtarget, MF)) {
MF.getFrameInfo().setFrameAddressIsTaken(true);
X86FI->setHasSwiftAsyncContext(true);
SDValue Chain = Op->getOperand(0);
@@ -26590,13 +26599,15 @@
return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result,
CopyRBP.getValue(1));
} else {
- // 32-bit so no special extended frame, create or reuse an existing
- // stack slot.
+ // No special extended frame, create or reuse an existing stack slot.
+ int PtrSize = Subtarget.is64Bit() ? 8 : 4;
if (!X86FI->getSwiftAsyncContextFrameIdx())
X86FI->setSwiftAsyncContextFrameIdx(
- MF.getFrameInfo().CreateStackObject(4, Align(4), false));
+ MF.getFrameInfo().CreateStackObject(PtrSize, Align(PtrSize),
+ false));
SDValue Result =
- DAG.getFrameIndex(*X86FI->getSwiftAsyncContextFrameIdx(), MVT::i32);
+ DAG.getFrameIndex(*X86FI->getSwiftAsyncContextFrameIdx(),
+ PtrSize == 8 ? MVT::i64 : MVT::i32);
// Return { result, chain }.
return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result,
Op->getOperand(0));
diff --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h
index 3274540..f93c547 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.h
+++ b/llvm/lib/Target/X86/X86ISelLowering.h
@@ -966,6 +966,11 @@
/// Check if Op is an operation that could be folded into a zero extend x86
/// instruction.
bool mayFoldIntoZeroExtend(SDValue Op);
+
+ /// True if the target supports the extended frame for async Swift
+ /// functions.
+ bool isExtendedSwiftAsyncFrameSupported(const X86Subtarget &Subtarget,
+ const MachineFunction &MF);
} // end namespace X86
//===--------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/X86/X86ISelLoweringCall.cpp b/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
index d75bd41..8c9bc75 100644
--- a/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
+++ b/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
@@ -1813,14 +1813,17 @@
for (unsigned I = 0, E = Ins.size(); I != E; ++I) {
if (Ins[I].Flags.isSwiftAsync()) {
auto X86FI = MF.getInfo<X86MachineFunctionInfo>();
- if (Subtarget.is64Bit())
+ if (X86::isExtendedSwiftAsyncFrameSupported(Subtarget, MF))
X86FI->setHasSwiftAsyncContext(true);
else {
- int FI = MF.getFrameInfo().CreateStackObject(4, Align(4), false);
+ int PtrSize = Subtarget.is64Bit() ? 8 : 4;
+ int FI =
+ MF.getFrameInfo().CreateStackObject(PtrSize, Align(PtrSize), false);
X86FI->setSwiftAsyncContextFrameIdx(FI);
- SDValue St = DAG.getStore(DAG.getEntryNode(), dl, InVals[I],
- DAG.getFrameIndex(FI, MVT::i32),
- MachinePointerInfo::getFixedStack(MF, FI));
+ SDValue St = DAG.getStore(
+ DAG.getEntryNode(), dl, InVals[I],
+ DAG.getFrameIndex(FI, PtrSize == 8 ? MVT::i64 : MVT::i32),
+ MachinePointerInfo::getFixedStack(MF, FI));
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, St, Chain);
}
}
diff --git a/llvm/test/CodeGen/X86/swift-async-win64.ll b/llvm/test/CodeGen/X86/swift-async-win64.ll
index 8f30eb6..843118b 100644
--- a/llvm/test/CodeGen/X86/swift-async-win64.ll
+++ b/llvm/test/CodeGen/X86/swift-async-win64.ll
@@ -6,14 +6,12 @@
}
; CHECK64-LABEL: simple:
-; CHECK64: btsq $60, %rbp
; CHECK64: pushq %rbp
-; CHECK64: pushq %r14
-; CHECK64: leaq 8(%rsp), %rbp
-; [...]
-; CHECK64: addq $16, %rsp
+; CHECK64: pushq %rax
+; CHECK64: movq %rsp, %rbp
+; CHECK64: movq %r14, (%rbp)
+; CHECK64: addq $8, %rsp
; CHECK64: popq %rbp
-; CHECK64: btrq $60, %rbp
; CHECK64: retq
; CHECK32-LABEL: simple:
@@ -26,20 +24,20 @@
}
; CHECK64-LABEL: more_csrs:
-; CHECK64: btsq $60, %rbp
; CHECK64: pushq %rbp
; CHECK64: .seh_pushreg %rbp
-; CHECK64: pushq %r14
-; CHECK64: .seh_pushreg %r14
-; CHECK64: leaq 8(%rsp), %rbp
-; CHECK64: subq $8, %rsp
; CHECK64: pushq %r15
; CHECK64: .seh_pushreg %r15
+; CHECK64: pushq %rax
+; CHECK64: .seh_stackalloc 8
+; CHECK64: movq %rsp, %rbp
+; CHECK64: .seh_setframe %rbp, 0
+; CHECK64: .seh_endprologue
+; CHECK64: movq %r14, (%rbp)
; [...]
+; CHECK64: addq $8, %rsp
; CHECK64: popq %r15
-; CHECK64: addq $16, %rsp
; CHECK64: popq %rbp
-; CHECK64: btrq $60, %rbp
; CHECK64: retq
declare void @f(ptr)
@@ -51,21 +49,16 @@
}
; CHECK64-LABEL: locals:
-; CHECK64: btsq $60, %rbp
; CHECK64: pushq %rbp
; CHECK64: .seh_pushreg %rbp
-; CHECK64: pushq %r14
-; CHECK64: .seh_pushreg %r14
-; CHECK64: leaq 8(%rsp), %rbp
-; CHECK64: subq $88, %rsp
+; CHECK64: subq $80, %rsp
+; CHECK64: movq %r14, -8(%rbp)
; CHECK64: leaq -48(%rbp), %rcx
; CHECK64: callq f
; CHECK64: addq $80, %rsp
-; CHECK64: addq $16, %rsp
; CHECK64: popq %rbp
-; CHECK64: btrq $60, %rbp
; CHECK64: retq
define void @use_input_context(ptr swiftasync %context, ptr %ptr) "frame-pointer"="all" {
@@ -84,7 +77,7 @@
}
; CHECK64-LABEL: context_in_func:
-; CHECK64: leaq -8(%rbp), %rax
+; CHECK64: movq %rsp, %rax
; CHECK32-LABEL: context_in_func:
; CHECK32: movl %esp, %eax
@@ -96,9 +89,7 @@
}
; CHECK64-LABEL: write_frame_context:
-; CHECK64: movq %rbp, [[TMP:%.*]]
-; CHECK64: subq $8, [[TMP]]
-; CHECK64: movq %rcx, ([[TMP]])
+; CHECK64: movq %rcx, (%rsp)
define void @simple_fp_elim(ptr swiftasync %context) "frame-pointer"="non-leaf" {
ret void
@@ -106,3 +97,25 @@
; CHECK64-LABEL: simple_fp_elim:
; CHECK64-NOT: btsq
+
+define void @manylocals_and_overwritten_context(ptr swiftasync %context, ptr %new_context) "frame-pointer"="all" {
+ %ptr = call ptr @llvm.swift.async.context.addr()
+ store ptr %new_context, ptr %ptr
+ %var1 = alloca i64, i64 1
+ call void @f(ptr %var1)
+ %var2 = alloca i64, i64 16
+ call void @f(ptr %var2)
+ %ptr2 = call ptr @llvm.swift.async.context.addr()
+ store ptr %new_context, ptr %ptr2
+ ret void
+}
+
+; CHECK64-LABEL: manylocals_and_overwritten_context:
+; CHECK64: pushq %rbp
+; CHECK64: subq $184, %rsp
+; CHECK64: leaq 128(%rsp), %rbp
+; CHECK64: movq %rcx, %rsi
+; CHECK64: movq %rcx, 48(%rbp)
+; CHECK64: callq f
+; CHECK64: callq f
+; CHECK64: movq %rsi, 48(%rbp)