[LegalizeTypes] Expand FNEG to bitwise op for IEEE FP types

Summary:
Except for custom floating point types x86_fp80 and ppc_fp128,
expand Y = FNEG(X) to Y = X ^ sign mask to avoid library call.
 Using bitwise operation can improve code size and performance.

Reviewers: efriedma

Reviewed By: efriedma

Subscribers: efriedma, kpn, arsenm, eli.friedman, javed.absar, rbar, johnrusso, simoncook, sabuasal, niosHD, zzheng, edward-jones, rogfer01, MartinMosbeck, brucehoult, the_o, PkmX, jocewei, asb, llvm-commits

Differential Revision: https://reviews.llvm.org/D57875

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@353757 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
index 17dee8d..ae00d69 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
@@ -440,6 +440,15 @@
     return SDValue(N, ResNo);
   EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
   SDLoc dl(N);
+
+  EVT FloatVT = N->getValueType(ResNo);
+  if (FloatVT == MVT::f32 || FloatVT == MVT::f64 || FloatVT == MVT::f128) {
+    // Expand Y = FNEG(X) -> Y = X ^ sign mask
+    APInt SignMask = APInt::getSignMask(NVT.getSizeInBits());
+    return DAG.getNode(ISD::XOR, dl, NVT, GetSoftenedFloat(N->getOperand(0)),
+                       DAG.getConstant(SignMask, dl, NVT));
+  }
+
   // Expand Y = FNEG(X) -> Y = SUB -0.0, X
   SDValue Ops[2] = { DAG.getConstantFP(-0.0, dl, N->getValueType(0)),
                      GetSoftenedFloat(N->getOperand(0)) };
diff --git a/test/CodeGen/ARM/legalize-fneg.ll b/test/CodeGen/ARM/legalize-fneg.ll
new file mode 100644
index 0000000..f7578ae
--- /dev/null
+++ b/test/CodeGen/ARM/legalize-fneg.ll
@@ -0,0 +1,61 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=arm-eabi -float-abi=soft  -verify-machineinstrs < %s \
+; RUN:   | FileCheck --check-prefixes=ARM %s
+; RUN: llc -mtriple=arm-eabi -float-abi=soft  -verify-machineinstrs < %s \
+; RUN:   | FileCheck --check-prefixes=NOLIB %s
+
+; Check Y = FNEG(X) -> Y = X ^ sign mask and no lib call is generated.
+define void @test1(float* %a, float* %b) {
+; ARM-LABEL: test1:
+; ARM:       @ %bb.0: @ %entry
+; ARM-NEXT:    ldr r1, [r1]
+; ARM-NEXT:    eor r1, r1, #-2147483648
+; ARM-NEXT:    str r1, [r0]
+; ARM-NEXT:    mov pc, lr
+; NOLIB-LABEL:  test1:
+; NOLIB:       eor
+; NOLIB-NOT:   bl __aeabi_fsub
+entry:
+  %0 = load float, float* %b
+  %neg = fneg float %0
+  store float %neg, float* %a
+  ret void
+}
+
+define void @test2(double* %a, double* %b) {
+; ARM-LABEL: test2:
+; ARM:       @ %bb.0: @ %entry
+; ARM-NEXT:    ldr r2, [r1]
+; ARM-NEXT:    ldr r1, [r1, #4]
+; ARM-NEXT:    str r2, [r0]
+; ARM-NEXT:    eor r1, r1, #-2147483648
+; ARM-NEXT:    str r1, [r0, #4]
+; ARM-NEXT:    mov pc, lr
+; NOLIB-LABEL:  test2:
+; NOLIB:       eor
+; NOLIB-NOT:   bl __aeabi_dsub
+entry:
+  %0 = load double, double* %b
+  %neg = fneg double %0
+  store double %neg, double* %a
+  ret void
+}
+
+define void @test3(fp128* %a, fp128* %b) {
+; ARM-LABEL: test3:
+; ARM:       @ %bb.0: @ %entry
+; ARM-NEXT:    ldm r1, {r2, r3, r12}
+; ARM-NEXT:    ldr r1, [r1, #12]
+; ARM-NEXT:    stm r0, {r2, r3, r12}
+; ARM-NEXT:    eor r1, r1, #-2147483648
+; ARM-NEXT:    str r1, [r0, #12]
+; ARM-NEXT:    mov pc, lr
+; NOLIB-LABEL: test3:
+; NOLIB:       eor
+; NOLIB-NOT:   bl __subtf3
+entry:
+  %0 = load fp128, fp128* %b
+  %neg = fneg fp128 %0
+  store fp128 %neg, fp128* %a
+  ret void
+}
diff --git a/test/CodeGen/RISCV/legalize-fneg.ll b/test/CodeGen/RISCV/legalize-fneg.ll
new file mode 100644
index 0000000..a38daaa
--- /dev/null
+++ b/test/CodeGen/RISCV/legalize-fneg.ll
@@ -0,0 +1,105 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV32 %s
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV64 %s
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=NOLIB %s
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=NOLIB %s
+
+define void @test1(float* %a, float* %b) {
+; RV32-LABEL: test1:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    lw a1, 0(a1)
+; RV32-NEXT:    lui a2, 524288
+; RV32-NEXT:    xor a1, a1, a2
+; RV32-NEXT:    sw a1, 0(a0)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: test1:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    addi a2, zero, 1
+; RV64-NEXT:    slli a2, a2, 31
+; RV64-NEXT:    lw a1, 0(a1)
+; RV64-NEXT:    xor a1, a1, a2
+; RV64-NEXT:    sw a1, 0(a0)
+; RV64-NEXT:    ret
+
+; NOLIB-LABEL: test1:
+; NOLIB:         xor
+; NOLIB-NOT:     call __subsf3
+entry:
+  %0 = load float, float* %b
+  %neg = fneg float %0
+  store float %neg, float* %a
+  ret void
+}
+
+define void @test2(double* %a, double* %b) {
+; RV32-LABEL: test2:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    lw a2, 4(a1)
+; RV32-NEXT:    lw a1, 0(a1)
+; RV32-NEXT:    sw a1, 0(a0)
+; RV32-NEXT:    lui a1, 524288
+; RV32-NEXT:    xor a1, a2, a1
+; RV32-NEXT:    sw a1, 4(a0)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: test2:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    addi a2, zero, -1
+; RV64-NEXT:    slli a2, a2, 63
+; RV64-NEXT:    ld a1, 0(a1)
+; RV64-NEXT:    xor a1, a1, a2
+; RV64-NEXT:    sd a1, 0(a0)
+; RV64-NEXT:    ret
+
+; NOLIB-LABEL: test2:
+; NOLIB:         xor
+; NOLIB-NOT:     call __subdf3
+entry:
+  %0 = load double, double* %b
+  %neg = fneg double %0
+  store double %neg, double* %a
+  ret void
+}
+
+define void @test3(fp128* %a, fp128* %b) {
+; RV32-LABEL: test3:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    lw a2, 12(a1)
+; RV32-NEXT:    lw a3, 0(a1)
+; RV32-NEXT:    lw a4, 4(a1)
+; RV32-NEXT:    lw a1, 8(a1)
+; RV32-NEXT:    sw a1, 8(a0)
+; RV32-NEXT:    sw a4, 4(a0)
+; RV32-NEXT:    sw a3, 0(a0)
+; RV32-NEXT:    lui a1, 524288
+; RV32-NEXT:    xor a1, a2, a1
+; RV32-NEXT:    sw a1, 12(a0)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: test3:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    ld a2, 8(a1)
+; RV64-NEXT:    ld a1, 0(a1)
+; RV64-NEXT:    sd a1, 0(a0)
+; RV64-NEXT:    addi a1, zero, -1
+; RV64-NEXT:    slli a1, a1, 63
+; RV64-NEXT:    xor a1, a2, a1
+; RV64-NEXT:    sd a1, 8(a0)
+; RV64-NEXT:    ret
+
+; NOLIB-LABEL: test3:
+; NOLIB:         xor
+; NOLIB-NOT:     call __subtf3
+entry:
+  %0 = load fp128, fp128* %b
+  %neg = fneg fp128 %0
+  store fp128 %neg, fp128* %a
+  ret void
+}
+
+