[StackProtector] Handle atomicrmw xchg in HasAddressTaken heuristic
Atomicrmw xchg can directly take a pointer operand, so we should
treat it similarly to store or cmpxchg.
In practice, I believe that all targets that support stack protectors
will convert this to an integer atomicrmw xchg in AtomicExpand, so
there is no issue in practice. We still should handle it correctly
if that doesn't happen.
diff --git a/llvm/lib/CodeGen/StackProtector.cpp b/llvm/lib/CodeGen/StackProtector.cpp
index 0ce305c..232e84fb 100644
--- a/llvm/lib/CodeGen/StackProtector.cpp
+++ b/llvm/lib/CodeGen/StackProtector.cpp
@@ -275,6 +275,10 @@
if (AI == cast<AtomicCmpXchgInst>(I)->getNewValOperand())
return true;
break;
+ case Instruction::AtomicRMW:
+ if (AI == cast<AtomicRMWInst>(I)->getValOperand())
+ return true;
+ break;
case Instruction::PtrToInt:
if (AI == cast<PtrToIntInst>(I)->getOperand(0))
return true;
@@ -327,13 +331,9 @@
break;
}
case Instruction::Load:
- case Instruction::AtomicRMW:
case Instruction::Ret:
// These instructions take an address operand, but have load-like or
// other innocuous behavior that should not trigger a stack protector.
- // atomicrmw conceptually has both load and store semantics, but the
- // value being stored must be integer; so if a pointer is being stored,
- // we'll catch it in the PtrToInt case above.
break;
default:
// Conservatively return true for any instruction that takes an address
diff --git a/llvm/test/CodeGen/X86/stack-protector-atomicrmw-xchg.ll b/llvm/test/CodeGen/X86/stack-protector-atomicrmw-xchg.ll
index a67643f..ed631fa 100644
--- a/llvm/test/CodeGen/X86/stack-protector-atomicrmw-xchg.ll
+++ b/llvm/test/CodeGen/X86/stack-protector-atomicrmw-xchg.ll
@@ -4,11 +4,25 @@
define void @atomicrmw_xchg(ptr %p) sspstrong {
; CHECK-LABEL: define void @atomicrmw_xchg(
; CHECK-SAME: ptr [[P:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[STACKGUARDSLOT:%.*]] = alloca ptr, align 8
+; CHECK-NEXT: [[STACKGUARD:%.*]] = load volatile ptr, ptr addrspace(257) inttoptr (i32 40 to ptr addrspace(257)), align 8
+; CHECK-NEXT: call void @llvm.stackprotector(ptr [[STACKGUARD]], ptr [[STACKGUARDSLOT]])
; CHECK-NEXT: [[A:%.*]] = alloca i64, align 8
; CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr [[P]], ptr [[A]] acquire, align 8
+; CHECK-NEXT: [[STACKGUARD1:%.*]] = load volatile ptr, ptr addrspace(257) inttoptr (i32 40 to ptr addrspace(257)), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load volatile ptr, ptr [[STACKGUARDSLOT]], align 8
+; CHECK-NEXT: [[TMP3:%.*]] = icmp eq ptr [[STACKGUARD1]], [[TMP2]]
+; CHECK-NEXT: br i1 [[TMP3]], label %[[SP_RETURN:.*]], label %[[CALLSTACKCHECKFAILBLK:.*]], !prof [[PROF0:![0-9]+]]
+; CHECK: [[SP_RETURN]]:
; CHECK-NEXT: ret void
+; CHECK: [[CALLSTACKCHECKFAILBLK]]:
+; CHECK-NEXT: call void @__stack_chk_fail()
+; CHECK-NEXT: unreachable
;
%a = alloca i64
atomicrmw xchg ptr %p, ptr %a acquire
ret void
}
+;.
+; CHECK: [[PROF0]] = !{!"branch_weights", i32 2147481600, i32 2048}
+;.