blob: 4f58eb4c6cb209bcf66067a98adbdfdf0f87b14b [file] [log] [blame]
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=zEC12 -verify-machineinstrs \
; RUN: | FileCheck %s --check-prefix=NOVEC
; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z16 -verify-machineinstrs \
; RUN: | FileCheck %s --check-prefix=VECTOR
;
; Tests for strict 16-bit floating point (half).
declare half @llvm.experimental.constrained.fadd.f16(half, half, metadata, metadata)
declare double @llvm.experimental.constrained.fadd.f64(double, double, metadata, metadata)
declare half @llvm.experimental.constrained.fmul.f16(half, half, metadata, metadata)
; Test register addition.
define half @fun0(half %f1, half %f2) #0 {
; NOVEC-LABEL: fun0:
; NOVEC: # %bb.0:
; NOVEC-NEXT: stmg %r14, %r15, 112(%r15)
; NOVEC-NEXT: .cfi_offset %r14, -48
; NOVEC-NEXT: .cfi_offset %r15, -40
; NOVEC-NEXT: aghi %r15, -176
; NOVEC-NEXT: .cfi_def_cfa_offset 336
; NOVEC-NEXT: std %f8, 168(%r15) # 8-byte Spill
; NOVEC-NEXT: std %f9, 160(%r15) # 8-byte Spill
; NOVEC-NEXT: .cfi_offset %f8, -168
; NOVEC-NEXT: .cfi_offset %f9, -176
; NOVEC-NEXT: ler %f8, %f0
; NOVEC-NEXT: ler %f0, %f2
; NOVEC-NEXT: brasl %r14, __extendhfsf2@PLT
; NOVEC-NEXT: ler %f9, %f0
; NOVEC-NEXT: ler %f0, %f8
; NOVEC-NEXT: brasl %r14, __extendhfsf2@PLT
; NOVEC-NEXT: aebr %f0, %f9
; NOVEC-NEXT: brasl %r14, __truncsfhf2@PLT
; NOVEC-NEXT: ld %f8, 168(%r15) # 8-byte Reload
; NOVEC-NEXT: ld %f9, 160(%r15) # 8-byte Reload
; NOVEC-NEXT: lmg %r14, %r15, 288(%r15)
; NOVEC-NEXT: br %r14
;
; VECTOR-LABEL: fun0:
; VECTOR: # %bb.0:
; VECTOR-NEXT: stmg %r14, %r15, 112(%r15)
; VECTOR-NEXT: .cfi_offset %r14, -48
; VECTOR-NEXT: .cfi_offset %r15, -40
; VECTOR-NEXT: aghi %r15, -176
; VECTOR-NEXT: .cfi_def_cfa_offset 336
; VECTOR-NEXT: std %f8, 168(%r15) # 8-byte Spill
; VECTOR-NEXT: std %f9, 160(%r15) # 8-byte Spill
; VECTOR-NEXT: .cfi_offset %f8, -168
; VECTOR-NEXT: .cfi_offset %f9, -176
; VECTOR-NEXT: ldr %f8, %f0
; VECTOR-NEXT: ldr %f0, %f2
; VECTOR-NEXT: brasl %r14, __extendhfsf2@PLT
; VECTOR-NEXT: ldr %f9, %f0
; VECTOR-NEXT: ldr %f0, %f8
; VECTOR-NEXT: brasl %r14, __extendhfsf2@PLT
; VECTOR-NEXT: aebr %f0, %f9
; VECTOR-NEXT: brasl %r14, __truncsfhf2@PLT
; VECTOR-NEXT: ld %f8, 168(%r15) # 8-byte Reload
; VECTOR-NEXT: ld %f9, 160(%r15) # 8-byte Reload
; VECTOR-NEXT: lmg %r14, %r15, 288(%r15)
; VECTOR-NEXT: br %r14
%res = call half @llvm.experimental.constrained.fadd.f16(
half %f1, half %f2,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #0
ret half %res
}
; Test atomic memory accesses and extension/truncation inside a strictfp
; function.
define void @fun1(ptr %Src, ptr %Dst) #0 {
; NOVEC-LABEL: fun1:
; NOVEC: # %bb.0: # %entry
; NOVEC-NEXT: stmg %r13, %r15, 104(%r15)
; NOVEC-NEXT: .cfi_offset %r13, -56
; NOVEC-NEXT: .cfi_offset %r14, -48
; NOVEC-NEXT: .cfi_offset %r15, -40
; NOVEC-NEXT: aghi %r15, -160
; NOVEC-NEXT: .cfi_def_cfa_offset 320
; NOVEC-NEXT: lgh %r0, 0(%r2)
; NOVEC-NEXT: sllg %r0, %r0, 48
; NOVEC-NEXT: lgr %r13, %r3
; NOVEC-NEXT: ldgr %f0, %r0
; NOVEC-NEXT: # kill: def $f0h killed $f0h killed $f0d
; NOVEC-NEXT: brasl %r14, __extendhfdf2@PLT
; NOVEC-NEXT: adbr %f0, %f0
; NOVEC-NEXT: brasl %r14, __truncdfhf2@PLT
; NOVEC-NEXT: # kill: def $f0h killed $f0h def $f0d
; NOVEC-NEXT: lgdr %r0, %f0
; NOVEC-NEXT: srlg %r0, %r0, 48
; NOVEC-NEXT: sth %r0, 0(%r13)
; NOVEC-NEXT: bcr 14, %r0
; NOVEC-NEXT: lmg %r13, %r15, 264(%r15)
; NOVEC-NEXT: br %r14
;
; VECTOR-LABEL: fun1:
; VECTOR: # %bb.0: # %entry
; VECTOR-NEXT: stmg %r13, %r15, 104(%r15)
; VECTOR-NEXT: .cfi_offset %r13, -56
; VECTOR-NEXT: .cfi_offset %r14, -48
; VECTOR-NEXT: .cfi_offset %r15, -40
; VECTOR-NEXT: aghi %r15, -160
; VECTOR-NEXT: .cfi_def_cfa_offset 320
; VECTOR-NEXT: vlreph %v0, 0(%r2)
; VECTOR-NEXT: lgr %r13, %r3
; VECTOR-NEXT: brasl %r14, __extendhfdf2@PLT
; VECTOR-NEXT: adbr %f0, %f0
; VECTOR-NEXT: brasl %r14, __truncdfhf2@PLT
; VECTOR-NEXT: vsteh %v0, 0(%r13), 0
; VECTOR-NEXT: bcr 14, %r0
; VECTOR-NEXT: lmg %r13, %r15, 264(%r15)
; VECTOR-NEXT: br %r14
entry:
%Op0 = load atomic half, ptr %Src seq_cst, align 2
%E0 = fpext half %Op0 to double
%Add = call double @llvm.experimental.constrained.fadd.f64(
double %E0, double %E0,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #0
%Res = fptrunc double %Add to half
store atomic half %Res, ptr %Dst seq_cst, align 2
ret void
}
; Test a chain of half operations which should have each operation surrounded
; by conversions to/from fp32 for proper emulation.
define half @fun2(half %Op0, half %Op1, half %Op2) #0 {
; NOVEC-LABEL: fun2:
; NOVEC: # %bb.0: # %entry
; NOVEC-NEXT: stmg %r14, %r15, 112(%r15)
; NOVEC-NEXT: .cfi_offset %r14, -48
; NOVEC-NEXT: .cfi_offset %r15, -40
; NOVEC-NEXT: aghi %r15, -184
; NOVEC-NEXT: .cfi_def_cfa_offset 344
; NOVEC-NEXT: std %f8, 176(%r15) # 8-byte Spill
; NOVEC-NEXT: std %f9, 168(%r15) # 8-byte Spill
; NOVEC-NEXT: std %f10, 160(%r15) # 8-byte Spill
; NOVEC-NEXT: .cfi_offset %f8, -168
; NOVEC-NEXT: .cfi_offset %f9, -176
; NOVEC-NEXT: .cfi_offset %f10, -184
; NOVEC-NEXT: ler %f9, %f0
; NOVEC-NEXT: ler %f0, %f2
; NOVEC-NEXT: ler %f8, %f4
; NOVEC-NEXT: brasl %r14, __extendhfsf2@PLT
; NOVEC-NEXT: ler %f10, %f0
; NOVEC-NEXT: ler %f0, %f9
; NOVEC-NEXT: brasl %r14, __extendhfsf2@PLT
; NOVEC-NEXT: meebr %f0, %f10
; NOVEC-NEXT: brasl %r14, __truncsfhf2@PLT
; NOVEC-NEXT: brasl %r14, __extendhfsf2@PLT
; NOVEC-NEXT: ler %f9, %f0
; NOVEC-NEXT: ler %f0, %f8
; NOVEC-NEXT: brasl %r14, __extendhfsf2@PLT
; NOVEC-NEXT: meebr %f0, %f9
; NOVEC-NEXT: brasl %r14, __truncsfhf2@PLT
; NOVEC-NEXT: ld %f8, 176(%r15) # 8-byte Reload
; NOVEC-NEXT: ld %f9, 168(%r15) # 8-byte Reload
; NOVEC-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; NOVEC-NEXT: lmg %r14, %r15, 296(%r15)
; NOVEC-NEXT: br %r14
;
; VECTOR-LABEL: fun2:
; VECTOR: # %bb.0: # %entry
; VECTOR-NEXT: stmg %r14, %r15, 112(%r15)
; VECTOR-NEXT: .cfi_offset %r14, -48
; VECTOR-NEXT: .cfi_offset %r15, -40
; VECTOR-NEXT: aghi %r15, -184
; VECTOR-NEXT: .cfi_def_cfa_offset 344
; VECTOR-NEXT: std %f8, 176(%r15) # 8-byte Spill
; VECTOR-NEXT: std %f9, 168(%r15) # 8-byte Spill
; VECTOR-NEXT: std %f10, 160(%r15) # 8-byte Spill
; VECTOR-NEXT: .cfi_offset %f8, -168
; VECTOR-NEXT: .cfi_offset %f9, -176
; VECTOR-NEXT: .cfi_offset %f10, -184
; VECTOR-NEXT: ldr %f9, %f0
; VECTOR-NEXT: ldr %f0, %f2
; VECTOR-NEXT: ldr %f8, %f4
; VECTOR-NEXT: brasl %r14, __extendhfsf2@PLT
; VECTOR-NEXT: ldr %f10, %f0
; VECTOR-NEXT: ldr %f0, %f9
; VECTOR-NEXT: brasl %r14, __extendhfsf2@PLT
; VECTOR-NEXT: meebr %f0, %f10
; VECTOR-NEXT: brasl %r14, __truncsfhf2@PLT
; VECTOR-NEXT: brasl %r14, __extendhfsf2@PLT
; VECTOR-NEXT: ldr %f9, %f0
; VECTOR-NEXT: ldr %f0, %f8
; VECTOR-NEXT: brasl %r14, __extendhfsf2@PLT
; VECTOR-NEXT: wfmsb %f0, %f9, %f0
; VECTOR-NEXT: brasl %r14, __truncsfhf2@PLT
; VECTOR-NEXT: ld %f8, 176(%r15) # 8-byte Reload
; VECTOR-NEXT: ld %f9, 168(%r15) # 8-byte Reload
; VECTOR-NEXT: ld %f10, 160(%r15) # 8-byte Reload
; VECTOR-NEXT: lmg %r14, %r15, 296(%r15)
; VECTOR-NEXT: br %r14
entry:
%A0 = call half @llvm.experimental.constrained.fmul.f16(
half %Op0, half %Op1,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #0
%Res = call half @llvm.experimental.constrained.fmul.f16(
half %A0, half %Op2,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #0
ret half %Res
}
attributes #0 = { strictfp }