[X86] matchAddressRecursively - add XOR(X, MIN_SIGNED_VALUE) handling
Allows us to fold XOR(X, MIN_SIGNED_VALUE) == ADD(X, MIN_SIGNED_VALUE) into LEA patterns
As mentioned on PR52267.
Differential Revision: https://reviews.llvm.org/D122815
GitOrigin-RevId: c64f37f818b7a928b48540920d6e0c6df8903128
diff --git a/lib/Target/X86/X86ISelDAGToDAG.cpp b/lib/Target/X86/X86ISelDAGToDAG.cpp
index aab534d..f7c365c 100644
--- a/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -2431,6 +2431,15 @@
return false;
break;
+ case ISD::XOR:
+ // We want to look through a transform in InstCombine that
+ // turns 'add' with min_signed_val into 'xor', so we can treat this 'xor'
+ // exactly like an 'add'.
+ if (auto *NC1 = dyn_cast<ConstantSDNode>(N.getOperand(1)))
+ if (NC1->isMinSignedValue() && !matchAdd(N, AM, Depth))
+ return false;
+ break;
+
case ISD::AND: {
// Perform some heroic transforms on an and of a constant-count shift
// with a constant to enable use of the scaled offset field.
diff --git a/lib/Target/X86/X86InstrInfo.td b/lib/Target/X86/X86InstrInfo.td
index f464b72..50626cf 100644
--- a/lib/Target/X86/X86InstrInfo.td
+++ b/lib/Target/X86/X86InstrInfo.td
@@ -840,11 +840,11 @@
// Define X86-specific addressing mode.
def addr : ComplexPattern<iPTR, 5, "selectAddr", [], [SDNPWantParent]>;
def lea32addr : ComplexPattern<i32, 5, "selectLEAAddr",
- [add, sub, mul, X86mul_imm, shl, or, frameindex],
+ [add, sub, mul, X86mul_imm, shl, or, xor, frameindex],
[]>;
// In 64-bit mode 32-bit LEAs can use RIP-relative addressing.
def lea64_32addr : ComplexPattern<i32, 5, "selectLEA64_32Addr",
- [add, sub, mul, X86mul_imm, shl, or,
+ [add, sub, mul, X86mul_imm, shl, or, xor,
frameindex, X86WrapperRIP],
[]>;
@@ -855,7 +855,7 @@
[tglobaltlsaddr], []>;
def lea64addr : ComplexPattern<i64, 5, "selectLEAAddr",
- [add, sub, mul, X86mul_imm, shl, or, frameindex,
+ [add, sub, mul, X86mul_imm, shl, or, xor, frameindex,
X86WrapperRIP], []>;
def tls64addr : ComplexPattern<i64, 5, "selectTLSADDRAddr",
diff --git a/test/CodeGen/X86/xor-lea.ll b/test/CodeGen/X86/xor-lea.ll
index 9876854..a4d5920 100644
--- a/test/CodeGen/X86/xor-lea.ll
+++ b/test/CodeGen/X86/xor-lea.ll
@@ -257,8 +257,8 @@
; X64-LABEL: sub_xor_sminval_i32:
; X64: # %bb.0:
; X64-NEXT: # kill: def $edi killed $edi def $rdi
-; X64-NEXT: xorl $-2147483648, %edi # imm = 0x80000000
-; X64-NEXT: leal -512(%rdi), %eax
+; X64-NEXT: movl $-512, %eax # imm = 0xFE00
+; X64-NEXT: leal -2147483648(%rdi,%rax), %eax
; X64-NEXT: retq
%r = xor i32 %x, 2147483648
%s = sub i32 %r, 512
@@ -335,15 +335,13 @@
; X86-LABEL: xor_shl_sminval_i32:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: shll $3, %eax
-; X86-NEXT: xorl $-2147483648, %eax # imm = 0x80000000
+; X86-NEXT: leal -2147483648(,%eax,8), %eax
; X86-NEXT: retl
;
; X64-LABEL: xor_shl_sminval_i32:
; X64: # %bb.0:
; X64-NEXT: # kill: def $edi killed $edi def $rdi
-; X64-NEXT: leal (,%rdi,8), %eax
-; X64-NEXT: xorl $-2147483648, %eax # imm = 0x80000000
+; X64-NEXT: leal -2147483648(,%rdi,8), %eax
; X64-NEXT: retq
%s = shl i32 %x, 3
%r = xor i32 %s, 2147483648
@@ -382,9 +380,8 @@
;
; X64-LABEL: xor_shl_sminval_i64:
; X64: # %bb.0:
-; X64-NEXT: leaq (,%rdi,4), %rcx
; X64-NEXT: movabsq $-9223372036854775808, %rax # imm = 0x8000000000000000
-; X64-NEXT: xorq %rcx, %rax
+; X64-NEXT: leaq (%rax,%rdi,4), %rax
; X64-NEXT: retq
%s = shl i64 %x, 2
%r = xor i64 %s, -9223372036854775808