| ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py |
| ; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -stop-after=irtranslator < %s -o - | FileCheck %s --check-prefix=ALL |
| |
| %struct.f1 = type { float } |
| %struct.d1 = type { double } |
| %struct.d2 = type { double, double } |
| %struct.i1 = type { i32 } |
| %struct.i2 = type { i32, i32 } |
| %struct.i3 = type { i32, i32, i32 } |
| %struct.i4 = type { i32, i32, i32, i32 } |
| |
| define float @test_return_f1(float %f.coerce) { |
| ; ALL-LABEL: name: test_return_f1 |
| ; ALL: bb.1.entry: |
| ; ALL: liveins: $xmm0 |
| ; ALL: [[COPY:%[0-9]+]]:_(s128) = COPY $xmm0 |
| ; ALL: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s128) |
| ; ALL: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 |
| ; ALL: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.retval |
| ; ALL: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.f |
| ; ALL: G_STORE [[TRUNC]](s32), [[FRAME_INDEX1]](p0) :: (store 4 into %ir.coerce.dive2) |
| ; ALL: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp |
| ; ALL: $rdi = COPY [[FRAME_INDEX]](p0) |
| ; ALL: $rsi = COPY [[FRAME_INDEX1]](p0) |
| ; ALL: $rdx = COPY [[C]](s64) |
| ; ALL: CALL64pcrel32 &memcpy, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit $rsi, implicit $rdx |
| ; ALL: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp |
| ; ALL: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (load 4 from %ir.coerce.dive13) |
| ; ALL: [[ANYEXT:%[0-9]+]]:_(s128) = G_ANYEXT [[LOAD]](s32) |
| ; ALL: $xmm0 = COPY [[ANYEXT]](s128) |
| ; ALL: RET 0, implicit $xmm0 |
| entry: |
| %retval = alloca %struct.f1, align 4 |
| %f = alloca %struct.f1, align 4 |
| %coerce.dive = getelementptr inbounds %struct.f1, %struct.f1* %f, i32 0, i32 0 |
| store float %f.coerce, float* %coerce.dive, align 4 |
| %0 = bitcast %struct.f1* %retval to i8* |
| %1 = bitcast %struct.f1* %f to i8* |
| call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 %1, i64 4, i1 false) |
| %coerce.dive1 = getelementptr inbounds %struct.f1, %struct.f1* %retval, i32 0, i32 0 |
| %2 = load float, float* %coerce.dive1, align 4 |
| ret float %2 |
| } |
| |
| declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1) #1 |
| |
| define double @test_return_d1(double %d.coerce) { |
| ; ALL-LABEL: name: test_return_d1 |
| ; ALL: bb.1.entry: |
| ; ALL: liveins: $xmm0 |
| ; ALL: [[COPY:%[0-9]+]]:_(s128) = COPY $xmm0 |
| ; ALL: [[TRUNC:%[0-9]+]]:_(s64) = G_TRUNC [[COPY]](s128) |
| ; ALL: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 |
| ; ALL: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.retval |
| ; ALL: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.d |
| ; ALL: G_STORE [[TRUNC]](s64), [[FRAME_INDEX1]](p0) :: (store 8 into %ir.coerce.dive2) |
| ; ALL: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp |
| ; ALL: $rdi = COPY [[FRAME_INDEX]](p0) |
| ; ALL: $rsi = COPY [[FRAME_INDEX1]](p0) |
| ; ALL: $rdx = COPY [[C]](s64) |
| ; ALL: CALL64pcrel32 &memcpy, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit $rsi, implicit $rdx |
| ; ALL: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp |
| ; ALL: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (load 8 from %ir.coerce.dive13) |
| ; ALL: [[ANYEXT:%[0-9]+]]:_(s128) = G_ANYEXT [[LOAD]](s64) |
| ; ALL: $xmm0 = COPY [[ANYEXT]](s128) |
| ; ALL: RET 0, implicit $xmm0 |
| entry: |
| %retval = alloca %struct.d1, align 8 |
| %d = alloca %struct.d1, align 8 |
| %coerce.dive = getelementptr inbounds %struct.d1, %struct.d1* %d, i32 0, i32 0 |
| store double %d.coerce, double* %coerce.dive, align 8 |
| %0 = bitcast %struct.d1* %retval to i8* |
| %1 = bitcast %struct.d1* %d to i8* |
| call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %0, i8* align 8 %1, i64 8, i1 false) |
| %coerce.dive1 = getelementptr inbounds %struct.d1, %struct.d1* %retval, i32 0, i32 0 |
| %2 = load double, double* %coerce.dive1, align 8 |
| ret double %2 |
| } |
| |
| define { double, double } @test_return_d2(double %d.coerce0, double %d.coerce1) { |
| ; ALL-LABEL: name: test_return_d2 |
| ; ALL: bb.1.entry: |
| ; ALL: liveins: $xmm0, $xmm1 |
| ; ALL: [[COPY:%[0-9]+]]:_(s128) = COPY $xmm0 |
| ; ALL: [[TRUNC:%[0-9]+]]:_(s64) = G_TRUNC [[COPY]](s128) |
| ; ALL: [[COPY1:%[0-9]+]]:_(s128) = COPY $xmm1 |
| ; ALL: [[TRUNC1:%[0-9]+]]:_(s64) = G_TRUNC [[COPY1]](s128) |
| ; ALL: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 |
| ; ALL: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 |
| ; ALL: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.retval |
| ; ALL: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.d |
| ; ALL: G_STORE [[TRUNC]](s64), [[FRAME_INDEX1]](p0) :: (store 8 into %ir.1) |
| ; ALL: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[FRAME_INDEX1]], [[C]](s64) |
| ; ALL: G_STORE [[TRUNC1]](s64), [[GEP]](p0) :: (store 8 into %ir.2) |
| ; ALL: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp |
| ; ALL: $rdi = COPY [[FRAME_INDEX]](p0) |
| ; ALL: $rsi = COPY [[FRAME_INDEX1]](p0) |
| ; ALL: $rdx = COPY [[C1]](s64) |
| ; ALL: CALL64pcrel32 &memcpy, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit $rsi, implicit $rdx |
| ; ALL: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp |
| ; ALL: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (load 8 from %ir.5) |
| ; ALL: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 |
| ; ALL: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[FRAME_INDEX]], [[C2]](s64) |
| ; ALL: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[GEP1]](p0) :: (load 8 from %ir.5 + 8) |
| ; ALL: [[ANYEXT:%[0-9]+]]:_(s128) = G_ANYEXT [[LOAD]](s64) |
| ; ALL: $xmm0 = COPY [[ANYEXT]](s128) |
| ; ALL: [[ANYEXT1:%[0-9]+]]:_(s128) = G_ANYEXT [[LOAD1]](s64) |
| ; ALL: $xmm1 = COPY [[ANYEXT1]](s128) |
| ; ALL: RET 0, implicit $xmm0, implicit $xmm1 |
| entry: |
| %retval = alloca %struct.d2, align 8 |
| %d = alloca %struct.d2, align 8 |
| %0 = bitcast %struct.d2* %d to { double, double }* |
| %1 = getelementptr inbounds { double, double }, { double, double }* %0, i32 0, i32 0 |
| store double %d.coerce0, double* %1, align 8 |
| %2 = getelementptr inbounds { double, double }, { double, double }* %0, i32 0, i32 1 |
| store double %d.coerce1, double* %2, align 8 |
| %3 = bitcast %struct.d2* %retval to i8* |
| %4 = bitcast %struct.d2* %d to i8* |
| call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %3, i8* align 8 %4, i64 16, i1 false) |
| %5 = bitcast %struct.d2* %retval to { double, double }* |
| %6 = load { double, double }, { double, double }* %5, align 8 |
| ret { double, double } %6 |
| } |
| |
| define i32 @test_return_i1(i32 %i.coerce) { |
| ; ALL-LABEL: name: test_return_i1 |
| ; ALL: bb.1.entry: |
| ; ALL: liveins: $edi |
| ; ALL: [[COPY:%[0-9]+]]:_(s32) = COPY $edi |
| ; ALL: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 |
| ; ALL: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.retval |
| ; ALL: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.i |
| ; ALL: G_STORE [[COPY]](s32), [[FRAME_INDEX1]](p0) :: (store 4 into %ir.coerce.dive2) |
| ; ALL: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp |
| ; ALL: $rdi = COPY [[FRAME_INDEX]](p0) |
| ; ALL: $rsi = COPY [[FRAME_INDEX1]](p0) |
| ; ALL: $rdx = COPY [[C]](s64) |
| ; ALL: CALL64pcrel32 &memcpy, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit $rsi, implicit $rdx |
| ; ALL: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp |
| ; ALL: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (load 4 from %ir.coerce.dive13) |
| ; ALL: $eax = COPY [[LOAD]](s32) |
| ; ALL: RET 0, implicit $eax |
| entry: |
| %retval = alloca %struct.i1, align 4 |
| %i = alloca %struct.i1, align 4 |
| %coerce.dive = getelementptr inbounds %struct.i1, %struct.i1* %i, i32 0, i32 0 |
| store i32 %i.coerce, i32* %coerce.dive, align 4 |
| %0 = bitcast %struct.i1* %retval to i8* |
| %1 = bitcast %struct.i1* %i to i8* |
| call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 %1, i64 4, i1 false) |
| %coerce.dive1 = getelementptr inbounds %struct.i1, %struct.i1* %retval, i32 0, i32 0 |
| %2 = load i32, i32* %coerce.dive1, align 4 |
| ret i32 %2 |
| } |
| |
| define i64 @test_return_i2(i64 %i.coerce) { |
| ; ALL-LABEL: name: test_return_i2 |
| ; ALL: bb.1.entry: |
| ; ALL: liveins: $rdi |
| ; ALL: [[COPY:%[0-9]+]]:_(s64) = COPY $rdi |
| ; ALL: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 |
| ; ALL: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.retval |
| ; ALL: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.i |
| ; ALL: G_STORE [[COPY]](s64), [[FRAME_INDEX1]](p0) :: (store 8 into %ir.0, align 4) |
| ; ALL: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp |
| ; ALL: $rdi = COPY [[FRAME_INDEX]](p0) |
| ; ALL: $rsi = COPY [[FRAME_INDEX1]](p0) |
| ; ALL: $rdx = COPY [[C]](s64) |
| ; ALL: CALL64pcrel32 &memcpy, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit $rsi, implicit $rdx |
| ; ALL: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp |
| ; ALL: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (load 8 from %ir.3, align 4) |
| ; ALL: $rax = COPY [[LOAD]](s64) |
| ; ALL: RET 0, implicit $rax |
| entry: |
| %retval = alloca %struct.i2, align 4 |
| %i = alloca %struct.i2, align 4 |
| %0 = bitcast %struct.i2* %i to i64* |
| store i64 %i.coerce, i64* %0, align 4 |
| %1 = bitcast %struct.i2* %retval to i8* |
| %2 = bitcast %struct.i2* %i to i8* |
| call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %1, i8* align 4 %2, i64 8, i1 false) |
| %3 = bitcast %struct.i2* %retval to i64* |
| %4 = load i64, i64* %3, align 4 |
| ret i64 %4 |
| } |
| |
| define { i64, i32 } @test_return_i3(i64 %i.coerce0, i32 %i.coerce1) { |
| ; ALL-LABEL: name: test_return_i3 |
| ; ALL: bb.1.entry: |
| ; ALL: liveins: $esi, $rdi |
| ; ALL: [[COPY:%[0-9]+]]:_(s64) = COPY $rdi |
| ; ALL: [[COPY1:%[0-9]+]]:_(s32) = COPY $esi |
| ; ALL: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 |
| ; ALL: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 |
| ; ALL: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.retval |
| ; ALL: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.i |
| ; ALL: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.2.coerce |
| ; ALL: [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.3.tmp |
| ; ALL: G_STORE [[COPY]](s64), [[FRAME_INDEX2]](p0) :: (store 8 into %ir.0, align 4) |
| ; ALL: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[FRAME_INDEX2]], [[C]](s64) |
| ; ALL: G_STORE [[COPY1]](s32), [[GEP]](p0) :: (store 4 into %ir.1) |
| ; ALL: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp |
| ; ALL: $rdi = COPY [[FRAME_INDEX1]](p0) |
| ; ALL: $rsi = COPY [[FRAME_INDEX2]](p0) |
| ; ALL: $rdx = COPY [[C1]](s64) |
| ; ALL: CALL64pcrel32 &memcpy, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit $rsi, implicit $rdx |
| ; ALL: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp |
| ; ALL: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp |
| ; ALL: $rdi = COPY [[FRAME_INDEX]](p0) |
| ; ALL: $rsi = COPY [[FRAME_INDEX1]](p0) |
| ; ALL: $rdx = COPY [[C1]](s64) |
| ; ALL: CALL64pcrel32 &memcpy, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit $rsi, implicit $rdx |
| ; ALL: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp |
| ; ALL: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp |
| ; ALL: $rdi = COPY [[FRAME_INDEX3]](p0) |
| ; ALL: $rsi = COPY [[FRAME_INDEX]](p0) |
| ; ALL: $rdx = COPY [[C1]](s64) |
| ; ALL: CALL64pcrel32 &memcpy, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit $rsi, implicit $rdx |
| ; ALL: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp |
| ; ALL: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX3]](p0) :: (load 8 from %ir.tmp) |
| ; ALL: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 |
| ; ALL: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[FRAME_INDEX3]], [[C2]](s64) |
| ; ALL: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[GEP1]](p0) :: (load 4 from %ir.tmp + 8, align 8) |
| ; ALL: $rax = COPY [[LOAD]](s64) |
| ; ALL: $edx = COPY [[LOAD1]](s32) |
| ; ALL: RET 0, implicit $rax, implicit $edx |
| entry: |
| %retval = alloca %struct.i3, align 4 |
| %i = alloca %struct.i3, align 4 |
| %coerce = alloca { i64, i32 }, align 4 |
| %tmp = alloca { i64, i32 }, align 8 |
| %0 = getelementptr inbounds { i64, i32 }, { i64, i32 }* %coerce, i32 0, i32 0 |
| store i64 %i.coerce0, i64* %0, align 4 |
| %1 = getelementptr inbounds { i64, i32 }, { i64, i32 }* %coerce, i32 0, i32 1 |
| store i32 %i.coerce1, i32* %1, align 4 |
| %2 = bitcast %struct.i3* %i to i8* |
| %3 = bitcast { i64, i32 }* %coerce to i8* |
| call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %2, i8* align 4 %3, i64 12, i1 false) |
| %4 = bitcast %struct.i3* %retval to i8* |
| %5 = bitcast %struct.i3* %i to i8* |
| call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %4, i8* align 4 %5, i64 12, i1 false) |
| %6 = bitcast { i64, i32 }* %tmp to i8* |
| %7 = bitcast %struct.i3* %retval to i8* |
| call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %6, i8* align 4 %7, i64 12, i1 false) |
| %8 = load { i64, i32 }, { i64, i32 }* %tmp, align 8 |
| ret { i64, i32 } %8 |
| } |
| |
| define { i64, i64 } @test_return_i4(i64 %i.coerce0, i64 %i.coerce1) { |
| ; ALL-LABEL: name: test_return_i4 |
| ; ALL: bb.1.entry: |
| ; ALL: liveins: $rdi, $rsi |
| ; ALL: [[COPY:%[0-9]+]]:_(s64) = COPY $rdi |
| ; ALL: [[COPY1:%[0-9]+]]:_(s64) = COPY $rsi |
| ; ALL: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 |
| ; ALL: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 |
| ; ALL: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.retval |
| ; ALL: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.i |
| ; ALL: G_STORE [[COPY]](s64), [[FRAME_INDEX1]](p0) :: (store 8 into %ir.1, align 4) |
| ; ALL: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[FRAME_INDEX1]], [[C]](s64) |
| ; ALL: G_STORE [[COPY1]](s64), [[GEP]](p0) :: (store 8 into %ir.2, align 4) |
| ; ALL: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp |
| ; ALL: $rdi = COPY [[FRAME_INDEX]](p0) |
| ; ALL: $rsi = COPY [[FRAME_INDEX1]](p0) |
| ; ALL: $rdx = COPY [[C1]](s64) |
| ; ALL: CALL64pcrel32 &memcpy, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit $rsi, implicit $rdx |
| ; ALL: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp |
| ; ALL: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (load 8 from %ir.5, align 4) |
| ; ALL: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 |
| ; ALL: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[FRAME_INDEX]], [[C2]](s64) |
| ; ALL: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[GEP1]](p0) :: (load 8 from %ir.5 + 8, align 4) |
| ; ALL: $rax = COPY [[LOAD]](s64) |
| ; ALL: $rdx = COPY [[LOAD1]](s64) |
| ; ALL: RET 0, implicit $rax, implicit $rdx |
| entry: |
| %retval = alloca %struct.i4, align 4 |
| %i = alloca %struct.i4, align 4 |
| %0 = bitcast %struct.i4* %i to { i64, i64 }* |
| %1 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %0, i32 0, i32 0 |
| store i64 %i.coerce0, i64* %1, align 4 |
| %2 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %0, i32 0, i32 1 |
| store i64 %i.coerce1, i64* %2, align 4 |
| %3 = bitcast %struct.i4* %retval to i8* |
| %4 = bitcast %struct.i4* %i to i8* |
| call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %3, i8* align 4 %4, i64 16, i1 false) |
| %5 = bitcast %struct.i4* %retval to { i64, i64 }* |
| %6 = load { i64, i64 }, { i64, i64 }* %5, align 4 |
| ret { i64, i64 } %6 |
| } |