| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2 |
| ; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-none-linux-gnu -mattr=+neon -fp-contract=fast | FileCheck %s |
| |
| ; FIXME: We should not generate ld/st for such register spill/fill, because the |
| ; test case seems very simple and the register pressure is not high. If the |
| ; spill/fill algorithm is optimized, this test case may not be triggered. And |
| ; then we can delete it. |
| define i32 @spill.DPairReg(ptr %arg1, i32 %arg2) { |
| ; CHECK-LABEL: spill.DPairReg: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: ld2 { v0.2s, v1.2s }, [x0] |
| ; CHECK-NEXT: cbz w1, .LBB0_2 |
| ; CHECK-NEXT: // %bb.1: // %if.end |
| ; CHECK-NEXT: mov w0, v0.s[1] |
| ; CHECK-NEXT: ret |
| ; CHECK-NEXT: .LBB0_2: // %if.then |
| ; CHECK-NEXT: sub sp, sp, #48 |
| ; CHECK-NEXT: stp x29, x30, [sp, #32] // 16-byte Folded Spill |
| ; CHECK-NEXT: .cfi_def_cfa_offset 48 |
| ; CHECK-NEXT: .cfi_offset w30, -8 |
| ; CHECK-NEXT: .cfi_offset w29, -16 |
| ; CHECK-NEXT: mov x8, sp |
| ; CHECK-NEXT: st1 { v0.2d, v1.2d }, [x8] // 32-byte Folded Spill |
| ; CHECK-NEXT: bl foo |
| ; CHECK-NEXT: mov x8, sp |
| ; CHECK-NEXT: ldp x29, x30, [sp, #32] // 16-byte Folded Reload |
| ; CHECK-NEXT: ld1 { v0.2d, v1.2d }, [x8] // 32-byte Folded Reload |
| ; CHECK-NEXT: add sp, sp, #48 |
| ; CHECK-NEXT: mov w0, v0.s[1] |
| ; CHECK-NEXT: ret |
| entry: |
| %vld = tail call { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld2.v2i32.p0(ptr %arg1) |
| %cmp = icmp eq i32 %arg2, 0 |
| br i1 %cmp, label %if.then, label %if.end |
| |
| if.then: |
| tail call void @foo() |
| br label %if.end |
| |
| if.end: |
| %vld.extract = extractvalue { <2 x i32>, <2 x i32> } %vld, 0 |
| %res = extractelement <2 x i32> %vld.extract, i32 1 |
| ret i32 %res |
| } |
| |
| define i16 @spill.DTripleReg(ptr %arg1, i32 %arg2) { |
| ; CHECK-LABEL: spill.DTripleReg: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: ld3 { v0.4h, v1.4h, v2.4h }, [x0] |
| ; CHECK-NEXT: cbz w1, .LBB1_2 |
| ; CHECK-NEXT: // %bb.1: // %if.end |
| ; CHECK-NEXT: umov w0, v0.h[1] |
| ; CHECK-NEXT: ret |
| ; CHECK-NEXT: .LBB1_2: // %if.then |
| ; CHECK-NEXT: sub sp, sp, #64 |
| ; CHECK-NEXT: stp x29, x30, [sp, #48] // 16-byte Folded Spill |
| ; CHECK-NEXT: .cfi_def_cfa_offset 64 |
| ; CHECK-NEXT: .cfi_offset w30, -8 |
| ; CHECK-NEXT: .cfi_offset w29, -16 |
| ; CHECK-NEXT: mov x8, sp |
| ; CHECK-NEXT: st1 { v0.2d, v1.2d, v2.2d }, [x8] // 48-byte Folded Spill |
| ; CHECK-NEXT: bl foo |
| ; CHECK-NEXT: mov x8, sp |
| ; CHECK-NEXT: ldp x29, x30, [sp, #48] // 16-byte Folded Reload |
| ; CHECK-NEXT: ld1 { v0.2d, v1.2d, v2.2d }, [x8] // 48-byte Folded Reload |
| ; CHECK-NEXT: add sp, sp, #64 |
| ; CHECK-NEXT: umov w0, v0.h[1] |
| ; CHECK-NEXT: ret |
| entry: |
| %vld = tail call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3.v4i16.p0(ptr %arg1) |
| %cmp = icmp eq i32 %arg2, 0 |
| br i1 %cmp, label %if.then, label %if.end |
| |
| if.then: |
| tail call void @foo() |
| br label %if.end |
| |
| if.end: |
| %vld.extract = extractvalue { <4 x i16>, <4 x i16>, <4 x i16> } %vld, 0 |
| %res = extractelement <4 x i16> %vld.extract, i32 1 |
| ret i16 %res |
| } |
| |
| define i16 @spill.DQuadReg(ptr %arg1, i32 %arg2) { |
| ; CHECK-LABEL: spill.DQuadReg: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: ld4 { v0.4h, v1.4h, v2.4h, v3.4h }, [x0] |
| ; CHECK-NEXT: cbz w1, .LBB2_2 |
| ; CHECK-NEXT: // %bb.1: // %if.end |
| ; CHECK-NEXT: umov w0, v0.h[0] |
| ; CHECK-NEXT: ret |
| ; CHECK-NEXT: .LBB2_2: // %if.then |
| ; CHECK-NEXT: sub sp, sp, #80 |
| ; CHECK-NEXT: stp x29, x30, [sp, #64] // 16-byte Folded Spill |
| ; CHECK-NEXT: .cfi_def_cfa_offset 80 |
| ; CHECK-NEXT: .cfi_offset w30, -8 |
| ; CHECK-NEXT: .cfi_offset w29, -16 |
| ; CHECK-NEXT: mov x8, sp |
| ; CHECK-NEXT: st1 { v0.2d, v1.2d, v2.2d, v3.2d }, [x8] // 64-byte Folded Spill |
| ; CHECK-NEXT: bl foo |
| ; CHECK-NEXT: mov x8, sp |
| ; CHECK-NEXT: ldp x29, x30, [sp, #64] // 16-byte Folded Reload |
| ; CHECK-NEXT: ld1 { v0.2d, v1.2d, v2.2d, v3.2d }, [x8] // 64-byte Folded Reload |
| ; CHECK-NEXT: add sp, sp, #80 |
| ; CHECK-NEXT: umov w0, v0.h[0] |
| ; CHECK-NEXT: ret |
| entry: |
| %vld = tail call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld4.v4i16.p0(ptr %arg1) |
| %cmp = icmp eq i32 %arg2, 0 |
| br i1 %cmp, label %if.then, label %if.end |
| |
| if.then: |
| tail call void @foo() |
| br label %if.end |
| |
| if.end: |
| %vld.extract = extractvalue { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %vld, 0 |
| %res = extractelement <4 x i16> %vld.extract, i32 0 |
| ret i16 %res |
| } |
| |
| define i32 @spill.QPairReg(ptr %arg1, i32 %arg2) { |
| ; CHECK-LABEL: spill.QPairReg: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: ld2 { v0.4s, v1.4s }, [x0] |
| ; CHECK-NEXT: cbz w1, .LBB3_2 |
| ; CHECK-NEXT: // %bb.1: // %if.end |
| ; CHECK-NEXT: mov w0, v0.s[1] |
| ; CHECK-NEXT: ret |
| ; CHECK-NEXT: .LBB3_2: // %if.then |
| ; CHECK-NEXT: sub sp, sp, #48 |
| ; CHECK-NEXT: stp x29, x30, [sp, #32] // 16-byte Folded Spill |
| ; CHECK-NEXT: .cfi_def_cfa_offset 48 |
| ; CHECK-NEXT: .cfi_offset w30, -8 |
| ; CHECK-NEXT: .cfi_offset w29, -16 |
| ; CHECK-NEXT: mov x8, sp |
| ; CHECK-NEXT: st1 { v0.2d, v1.2d }, [x8] // 32-byte Folded Spill |
| ; CHECK-NEXT: bl foo |
| ; CHECK-NEXT: mov x8, sp |
| ; CHECK-NEXT: ldp x29, x30, [sp, #32] // 16-byte Folded Reload |
| ; CHECK-NEXT: ld1 { v0.2d, v1.2d }, [x8] // 32-byte Folded Reload |
| ; CHECK-NEXT: add sp, sp, #48 |
| ; CHECK-NEXT: mov w0, v0.s[1] |
| ; CHECK-NEXT: ret |
| entry: |
| %vld = tail call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0(ptr %arg1) |
| %cmp = icmp eq i32 %arg2, 0 |
| br i1 %cmp, label %if.then, label %if.end |
| |
| if.then: |
| tail call void @foo() |
| br label %if.end |
| |
| if.end: |
| %vld.extract = extractvalue { <4 x i32>, <4 x i32> } %vld, 0 |
| %res = extractelement <4 x i32> %vld.extract, i32 1 |
| ret i32 %res |
| } |
| |
| define float @spill.QTripleReg(ptr %arg1, i32 %arg2) { |
| ; CHECK-LABEL: spill.QTripleReg: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: ld3 { v0.4s, v1.4s, v2.4s }, [x0] |
| ; CHECK-NEXT: cbz w1, .LBB4_2 |
| ; CHECK-NEXT: // %bb.1: // %if.end |
| ; CHECK-NEXT: mov s0, v0.s[1] |
| ; CHECK-NEXT: ret |
| ; CHECK-NEXT: .LBB4_2: // %if.then |
| ; CHECK-NEXT: sub sp, sp, #64 |
| ; CHECK-NEXT: stp x29, x30, [sp, #48] // 16-byte Folded Spill |
| ; CHECK-NEXT: .cfi_def_cfa_offset 64 |
| ; CHECK-NEXT: .cfi_offset w30, -8 |
| ; CHECK-NEXT: .cfi_offset w29, -16 |
| ; CHECK-NEXT: mov x8, sp |
| ; CHECK-NEXT: st1 { v0.2d, v1.2d, v2.2d }, [x8] // 48-byte Folded Spill |
| ; CHECK-NEXT: bl foo |
| ; CHECK-NEXT: mov x8, sp |
| ; CHECK-NEXT: ldp x29, x30, [sp, #48] // 16-byte Folded Reload |
| ; CHECK-NEXT: ld1 { v0.2d, v1.2d, v2.2d }, [x8] // 48-byte Folded Reload |
| ; CHECK-NEXT: add sp, sp, #64 |
| ; CHECK-NEXT: mov s0, v0.s[1] |
| ; CHECK-NEXT: ret |
| entry: |
| %vld3 = tail call { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld3.v4f32.p0(ptr %arg1) |
| %cmp = icmp eq i32 %arg2, 0 |
| br i1 %cmp, label %if.then, label %if.end |
| |
| if.then: |
| tail call void @foo() |
| br label %if.end |
| |
| if.end: |
| %vld3.extract = extractvalue { <4 x float>, <4 x float>, <4 x float> } %vld3, 0 |
| %res = extractelement <4 x float> %vld3.extract, i32 1 |
| ret float %res |
| } |
| |
| define i8 @spill.QQuadReg(ptr %arg1, i32 %arg2) { |
| ; CHECK-LABEL: spill.QQuadReg: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: ld4 { v0.16b, v1.16b, v2.16b, v3.16b }, [x0] |
| ; CHECK-NEXT: cbz w1, .LBB5_2 |
| ; CHECK-NEXT: // %bb.1: // %if.end |
| ; CHECK-NEXT: umov w0, v0.b[1] |
| ; CHECK-NEXT: ret |
| ; CHECK-NEXT: .LBB5_2: // %if.then |
| ; CHECK-NEXT: sub sp, sp, #80 |
| ; CHECK-NEXT: stp x29, x30, [sp, #64] // 16-byte Folded Spill |
| ; CHECK-NEXT: .cfi_def_cfa_offset 80 |
| ; CHECK-NEXT: .cfi_offset w30, -8 |
| ; CHECK-NEXT: .cfi_offset w29, -16 |
| ; CHECK-NEXT: mov x8, sp |
| ; CHECK-NEXT: st1 { v0.2d, v1.2d, v2.2d, v3.2d }, [x8] // 64-byte Folded Spill |
| ; CHECK-NEXT: bl foo |
| ; CHECK-NEXT: mov x8, sp |
| ; CHECK-NEXT: ldp x29, x30, [sp, #64] // 16-byte Folded Reload |
| ; CHECK-NEXT: ld1 { v0.2d, v1.2d, v2.2d, v3.2d }, [x8] // 64-byte Folded Reload |
| ; CHECK-NEXT: add sp, sp, #80 |
| ; CHECK-NEXT: umov w0, v0.b[1] |
| ; CHECK-NEXT: ret |
| entry: |
| %vld = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4.v16i8.p0(ptr %arg1) |
| %cmp = icmp eq i32 %arg2, 0 |
| br i1 %cmp, label %if.then, label %if.end |
| |
| if.then: |
| tail call void @foo() |
| br label %if.end |
| |
| if.end: |
| %vld.extract = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %vld, 0 |
| %res = extractelement <16 x i8> %vld.extract, i32 1 |
| ret i8 %res |
| } |
| |
| declare { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld2.v2i32.p0(ptr) |
| declare { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3.v4i16.p0(ptr) |
| declare { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld4.v4i16.p0(ptr) |
| declare { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0(ptr) |
| declare { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld3.v4f32.p0(ptr) |
| declare { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4.v16i8.p0(ptr) |
| |
| declare void @foo() |
| |
| ; FIXME: We should not generate ld/st for such register spill/fill, because the |
| ; test case seems very simple and the register pressure is not high. If the |
| ; spill/fill algorithm is optimized, this test case may not be triggered. And |
| ; then we can delete it. |
| ; check the spill for Register Class QPair_with_qsub_0_in_FPR128Lo |
| define <8 x i16> @test_2xFPR128Lo(i64 %got, ptr %ptr, <1 x i64> %a) { |
| ; CHECK-LABEL: test_2xFPR128Lo: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill |
| ; CHECK-NEXT: .cfi_def_cfa_offset 16 |
| ; CHECK-NEXT: .cfi_offset w30, -16 |
| ; CHECK-NEXT: movi v0.2d, #0000000000000000 |
| ; CHECK-NEXT: mov v1.16b, v0.16b |
| ; CHECK-NEXT: st2 { v0.d, v1.d }[0], [x1] |
| ; CHECK-NEXT: bl foo |
| ; CHECK-NEXT: movi v0.2d, #0000000000000000 |
| ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload |
| ; CHECK-NEXT: ret |
| tail call void @llvm.aarch64.neon.st2lane.v1i64.p0(<1 x i64> zeroinitializer, <1 x i64> zeroinitializer, i64 0, ptr %ptr) |
| tail call void @foo() |
| %sv = shufflevector <1 x i64> zeroinitializer, <1 x i64> %a, <2 x i32> <i32 0, i32 1> |
| %1 = bitcast <2 x i64> %sv to <8 x i16> |
| %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2> |
| %3 = mul <8 x i16> %2, %2 |
| ret <8 x i16> %3 |
| } |
| |
| ; check the spill for Register Class QTriple_with_qsub_0_in_FPR128Lo |
| define <8 x i16> @test_3xFPR128Lo(i64 %got, ptr %ptr, <1 x i64> %a) { |
| ; CHECK-LABEL: test_3xFPR128Lo: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill |
| ; CHECK-NEXT: .cfi_def_cfa_offset 16 |
| ; CHECK-NEXT: .cfi_offset w30, -16 |
| ; CHECK-NEXT: movi v0.2d, #0000000000000000 |
| ; CHECK-NEXT: mov v1.16b, v0.16b |
| ; CHECK-NEXT: mov v2.16b, v0.16b |
| ; CHECK-NEXT: st3 { v0.d, v1.d, v2.d }[0], [x1] |
| ; CHECK-NEXT: bl foo |
| ; CHECK-NEXT: movi v0.2d, #0000000000000000 |
| ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload |
| ; CHECK-NEXT: ret |
| tail call void @llvm.aarch64.neon.st3lane.v1i64.p0(<1 x i64> zeroinitializer, <1 x i64> zeroinitializer, <1 x i64> zeroinitializer, i64 0, ptr %ptr) |
| tail call void @foo() |
| %sv = shufflevector <1 x i64> zeroinitializer, <1 x i64> %a, <2 x i32> <i32 0, i32 1> |
| %1 = bitcast <2 x i64> %sv to <8 x i16> |
| %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2> |
| %3 = mul <8 x i16> %2, %2 |
| ret <8 x i16> %3 |
| } |
| |
| ; check the spill for Register Class QQuad_with_qsub_0_in_FPR128Lo |
| define <8 x i16> @test_4xFPR128Lo(i64 %got, ptr %ptr, <1 x i64> %a) { |
| ; CHECK-LABEL: test_4xFPR128Lo: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill |
| ; CHECK-NEXT: .cfi_def_cfa_offset 16 |
| ; CHECK-NEXT: .cfi_offset w30, -16 |
| ; CHECK-NEXT: movi v0.2d, #0000000000000000 |
| ; CHECK-NEXT: mov v1.16b, v0.16b |
| ; CHECK-NEXT: mov v2.16b, v0.16b |
| ; CHECK-NEXT: mov v3.16b, v0.16b |
| ; CHECK-NEXT: st4 { v0.d, v1.d, v2.d, v3.d }[0], [x1] |
| ; CHECK-NEXT: bl foo |
| ; CHECK-NEXT: movi v0.2d, #0000000000000000 |
| ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload |
| ; CHECK-NEXT: ret |
| tail call void @llvm.aarch64.neon.st4lane.v1i64.p0(<1 x i64> zeroinitializer, <1 x i64> zeroinitializer, <1 x i64> zeroinitializer, <1 x i64> zeroinitializer, i64 0, ptr %ptr) |
| tail call void @foo() |
| %sv = shufflevector <1 x i64> zeroinitializer, <1 x i64> %a, <2 x i32> <i32 0, i32 1> |
| %1 = bitcast <2 x i64> %sv to <8 x i16> |
| %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2> |
| %3 = mul <8 x i16> %2, %2 |
| ret <8 x i16> %3 |
| } |
| |
| declare void @llvm.aarch64.neon.st2lane.v1i64.p0(<1 x i64>, <1 x i64>, i64, ptr) |
| declare void @llvm.aarch64.neon.st3lane.v1i64.p0(<1 x i64>, <1 x i64>, <1 x i64>, i64, ptr) |
| declare void @llvm.aarch64.neon.st4lane.v1i64.p0(<1 x i64>, <1 x i64>, <1 x i64>, <1 x i64>, i64, ptr) |