| // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 |
| // RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +neon \ |
| // RUN: -disable-O0-optnone \ |
| // RUN: -flax-vector-conversions=none -emit-llvm -o - %s | opt -S -passes=mem2reg,sroa \ |
| // RUN: | FileCheck --check-prefixes=UNCONSTRAINED %s |
| // RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +neon \ |
| // RUN: -disable-O0-optnone \ |
| // RUN: -ffp-exception-behavior=strict \ |
| // RUN: -flax-vector-conversions=none -emit-llvm -o - %s | opt -S -passes=mem2reg,sroa \ |
| // RUN: | FileCheck --check-prefixes=CONSTRAINED %s |
| |
| // REQUIRES: aarch64-registered-target |
| |
| // Test new aarch64 intrinsics and types but constrained |
| |
| #include <arm_neon.h> |
| |
| // UNCONSTRAINED-LABEL: define dso_local <2 x float> @test_vadd_f32( |
| // UNCONSTRAINED-SAME: <2 x float> noundef [[V1:%.*]], <2 x float> noundef [[V2:%.*]]) #[[ATTR0:[0-9]+]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[ADD_I:%.*]] = fadd <2 x float> [[V1]], [[V2]] |
| // UNCONSTRAINED-NEXT: ret <2 x float> [[ADD_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local <2 x float> @test_vadd_f32( |
| // CONSTRAINED-SAME: <2 x float> noundef [[V1:%.*]], <2 x float> noundef [[V2:%.*]]) #[[ATTR0:[0-9]+]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[ADD_I:%.*]] = call <2 x float> @llvm.experimental.constrained.fadd.v2f32(<2 x float> [[V1]], <2 x float> [[V2]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3:[0-9]+]] |
| // CONSTRAINED-NEXT: ret <2 x float> [[ADD_I]] |
| // |
| float32x2_t test_vadd_f32(float32x2_t v1, float32x2_t v2) { |
| return vadd_f32(v1, v2); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local <4 x float> @test_vaddq_f32( |
| // UNCONSTRAINED-SAME: <4 x float> noundef [[V1:%.*]], <4 x float> noundef [[V2:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[ADD_I:%.*]] = fadd <4 x float> [[V1]], [[V2]] |
| // UNCONSTRAINED-NEXT: ret <4 x float> [[ADD_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local <4 x float> @test_vaddq_f32( |
| // CONSTRAINED-SAME: <4 x float> noundef [[V1:%.*]], <4 x float> noundef [[V2:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[ADD_I:%.*]] = call <4 x float> @llvm.experimental.constrained.fadd.v4f32(<4 x float> [[V1]], <4 x float> [[V2]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: ret <4 x float> [[ADD_I]] |
| // |
| float32x4_t test_vaddq_f32(float32x4_t v1, float32x4_t v2) { |
| return vaddq_f32(v1, v2); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local <2 x float> @test_vsub_f32( |
| // UNCONSTRAINED-SAME: <2 x float> noundef [[V1:%.*]], <2 x float> noundef [[V2:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[SUB_I:%.*]] = fsub <2 x float> [[V1]], [[V2]] |
| // UNCONSTRAINED-NEXT: ret <2 x float> [[SUB_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local <2 x float> @test_vsub_f32( |
| // CONSTRAINED-SAME: <2 x float> noundef [[V1:%.*]], <2 x float> noundef [[V2:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[SUB_I:%.*]] = call <2 x float> @llvm.experimental.constrained.fsub.v2f32(<2 x float> [[V1]], <2 x float> [[V2]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: ret <2 x float> [[SUB_I]] |
| // |
| float32x2_t test_vsub_f32(float32x2_t v1, float32x2_t v2) { |
| return vsub_f32(v1, v2); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local <4 x float> @test_vsubq_f32( |
| // UNCONSTRAINED-SAME: <4 x float> noundef [[V1:%.*]], <4 x float> noundef [[V2:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[SUB_I:%.*]] = fsub <4 x float> [[V1]], [[V2]] |
| // UNCONSTRAINED-NEXT: ret <4 x float> [[SUB_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local <4 x float> @test_vsubq_f32( |
| // CONSTRAINED-SAME: <4 x float> noundef [[V1:%.*]], <4 x float> noundef [[V2:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[SUB_I:%.*]] = call <4 x float> @llvm.experimental.constrained.fsub.v4f32(<4 x float> [[V1]], <4 x float> [[V2]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: ret <4 x float> [[SUB_I]] |
| // |
| float32x4_t test_vsubq_f32(float32x4_t v1, float32x4_t v2) { |
| return vsubq_f32(v1, v2); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local <2 x double> @test_vsubq_f64( |
| // UNCONSTRAINED-SAME: <2 x double> noundef [[V1:%.*]], <2 x double> noundef [[V2:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[SUB_I:%.*]] = fsub <2 x double> [[V1]], [[V2]] |
| // UNCONSTRAINED-NEXT: ret <2 x double> [[SUB_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local <2 x double> @test_vsubq_f64( |
| // CONSTRAINED-SAME: <2 x double> noundef [[V1:%.*]], <2 x double> noundef [[V2:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[SUB_I:%.*]] = call <2 x double> @llvm.experimental.constrained.fsub.v2f64(<2 x double> [[V1]], <2 x double> [[V2]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: ret <2 x double> [[SUB_I]] |
| // |
| float64x2_t test_vsubq_f64(float64x2_t v1, float64x2_t v2) { |
| return vsubq_f64(v1, v2); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local <2 x float> @test_vmul_f32( |
| // UNCONSTRAINED-SAME: <2 x float> noundef [[V1:%.*]], <2 x float> noundef [[V2:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[MUL_I:%.*]] = fmul <2 x float> [[V1]], [[V2]] |
| // UNCONSTRAINED-NEXT: ret <2 x float> [[MUL_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local <2 x float> @test_vmul_f32( |
| // CONSTRAINED-SAME: <2 x float> noundef [[V1:%.*]], <2 x float> noundef [[V2:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[MUL_I:%.*]] = call <2 x float> @llvm.experimental.constrained.fmul.v2f32(<2 x float> [[V1]], <2 x float> [[V2]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: ret <2 x float> [[MUL_I]] |
| // |
| float32x2_t test_vmul_f32(float32x2_t v1, float32x2_t v2) { |
| return vmul_f32(v1, v2); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local <4 x float> @test_vmulq_f32( |
| // UNCONSTRAINED-SAME: <4 x float> noundef [[V1:%.*]], <4 x float> noundef [[V2:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[MUL_I:%.*]] = fmul <4 x float> [[V1]], [[V2]] |
| // UNCONSTRAINED-NEXT: ret <4 x float> [[MUL_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local <4 x float> @test_vmulq_f32( |
| // CONSTRAINED-SAME: <4 x float> noundef [[V1:%.*]], <4 x float> noundef [[V2:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[MUL_I:%.*]] = call <4 x float> @llvm.experimental.constrained.fmul.v4f32(<4 x float> [[V1]], <4 x float> [[V2]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: ret <4 x float> [[MUL_I]] |
| // |
| float32x4_t test_vmulq_f32(float32x4_t v1, float32x4_t v2) { |
| return vmulq_f32(v1, v2); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local <2 x double> @test_vmulq_f64( |
| // UNCONSTRAINED-SAME: <2 x double> noundef [[V1:%.*]], <2 x double> noundef [[V2:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[MUL_I:%.*]] = fmul <2 x double> [[V1]], [[V2]] |
| // UNCONSTRAINED-NEXT: ret <2 x double> [[MUL_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local <2 x double> @test_vmulq_f64( |
| // CONSTRAINED-SAME: <2 x double> noundef [[V1:%.*]], <2 x double> noundef [[V2:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[MUL_I:%.*]] = call <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double> [[V1]], <2 x double> [[V2]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: ret <2 x double> [[MUL_I]] |
| // |
| float64x2_t test_vmulq_f64(float64x2_t v1, float64x2_t v2) { |
| return vmulq_f64(v1, v2); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local <2 x float> @test_vmla_f32( |
| // UNCONSTRAINED-SAME: <2 x float> noundef [[V1:%.*]], <2 x float> noundef [[V2:%.*]], <2 x float> noundef [[V3:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[MUL_I:%.*]] = fmul <2 x float> [[V2]], [[V3]] |
| // UNCONSTRAINED-NEXT: [[ADD_I:%.*]] = fadd <2 x float> [[V1]], [[MUL_I]] |
| // UNCONSTRAINED-NEXT: ret <2 x float> [[ADD_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local <2 x float> @test_vmla_f32( |
| // CONSTRAINED-SAME: <2 x float> noundef [[V1:%.*]], <2 x float> noundef [[V2:%.*]], <2 x float> noundef [[V3:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[MUL_I:%.*]] = call <2 x float> @llvm.experimental.constrained.fmul.v2f32(<2 x float> [[V2]], <2 x float> [[V3]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: [[ADD_I:%.*]] = call <2 x float> @llvm.experimental.constrained.fadd.v2f32(<2 x float> [[V1]], <2 x float> [[MUL_I]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: ret <2 x float> [[ADD_I]] |
| // |
| float32x2_t test_vmla_f32(float32x2_t v1, float32x2_t v2, float32x2_t v3) { |
| return vmla_f32(v1, v2, v3); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local <4 x float> @test_vmlaq_f32( |
| // UNCONSTRAINED-SAME: <4 x float> noundef [[V1:%.*]], <4 x float> noundef [[V2:%.*]], <4 x float> noundef [[V3:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[MUL_I:%.*]] = fmul <4 x float> [[V2]], [[V3]] |
| // UNCONSTRAINED-NEXT: [[ADD_I:%.*]] = fadd <4 x float> [[V1]], [[MUL_I]] |
| // UNCONSTRAINED-NEXT: ret <4 x float> [[ADD_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local <4 x float> @test_vmlaq_f32( |
| // CONSTRAINED-SAME: <4 x float> noundef [[V1:%.*]], <4 x float> noundef [[V2:%.*]], <4 x float> noundef [[V3:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[MUL_I:%.*]] = call <4 x float> @llvm.experimental.constrained.fmul.v4f32(<4 x float> [[V2]], <4 x float> [[V3]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: [[ADD_I:%.*]] = call <4 x float> @llvm.experimental.constrained.fadd.v4f32(<4 x float> [[V1]], <4 x float> [[MUL_I]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: ret <4 x float> [[ADD_I]] |
| // |
| float32x4_t test_vmlaq_f32(float32x4_t v1, float32x4_t v2, float32x4_t v3) { |
| return vmlaq_f32(v1, v2, v3); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local <2 x double> @test_vmlaq_f64( |
| // UNCONSTRAINED-SAME: <2 x double> noundef [[V1:%.*]], <2 x double> noundef [[V2:%.*]], <2 x double> noundef [[V3:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[MUL_I:%.*]] = fmul <2 x double> [[V2]], [[V3]] |
| // UNCONSTRAINED-NEXT: [[ADD_I:%.*]] = fadd <2 x double> [[V1]], [[MUL_I]] |
| // UNCONSTRAINED-NEXT: ret <2 x double> [[ADD_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local <2 x double> @test_vmlaq_f64( |
| // CONSTRAINED-SAME: <2 x double> noundef [[V1:%.*]], <2 x double> noundef [[V2:%.*]], <2 x double> noundef [[V3:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[MUL_I:%.*]] = call <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double> [[V2]], <2 x double> [[V3]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: [[ADD_I:%.*]] = call <2 x double> @llvm.experimental.constrained.fadd.v2f64(<2 x double> [[V1]], <2 x double> [[MUL_I]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: ret <2 x double> [[ADD_I]] |
| // |
| float64x2_t test_vmlaq_f64(float64x2_t v1, float64x2_t v2, float64x2_t v3) { |
| return vmlaq_f64(v1, v2, v3); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local <2 x float> @test_vmls_f32( |
| // UNCONSTRAINED-SAME: <2 x float> noundef [[V1:%.*]], <2 x float> noundef [[V2:%.*]], <2 x float> noundef [[V3:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[MUL_I:%.*]] = fmul <2 x float> [[V2]], [[V3]] |
| // UNCONSTRAINED-NEXT: [[SUB_I:%.*]] = fsub <2 x float> [[V1]], [[MUL_I]] |
| // UNCONSTRAINED-NEXT: ret <2 x float> [[SUB_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local <2 x float> @test_vmls_f32( |
| // CONSTRAINED-SAME: <2 x float> noundef [[V1:%.*]], <2 x float> noundef [[V2:%.*]], <2 x float> noundef [[V3:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[MUL_I:%.*]] = call <2 x float> @llvm.experimental.constrained.fmul.v2f32(<2 x float> [[V2]], <2 x float> [[V3]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: [[SUB_I:%.*]] = call <2 x float> @llvm.experimental.constrained.fsub.v2f32(<2 x float> [[V1]], <2 x float> [[MUL_I]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: ret <2 x float> [[SUB_I]] |
| // |
| float32x2_t test_vmls_f32(float32x2_t v1, float32x2_t v2, float32x2_t v3) { |
| return vmls_f32(v1, v2, v3); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local <4 x float> @test_vmlsq_f32( |
| // UNCONSTRAINED-SAME: <4 x float> noundef [[V1:%.*]], <4 x float> noundef [[V2:%.*]], <4 x float> noundef [[V3:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[MUL_I:%.*]] = fmul <4 x float> [[V2]], [[V3]] |
| // UNCONSTRAINED-NEXT: [[SUB_I:%.*]] = fsub <4 x float> [[V1]], [[MUL_I]] |
| // UNCONSTRAINED-NEXT: ret <4 x float> [[SUB_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local <4 x float> @test_vmlsq_f32( |
| // CONSTRAINED-SAME: <4 x float> noundef [[V1:%.*]], <4 x float> noundef [[V2:%.*]], <4 x float> noundef [[V3:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[MUL_I:%.*]] = call <4 x float> @llvm.experimental.constrained.fmul.v4f32(<4 x float> [[V2]], <4 x float> [[V3]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: [[SUB_I:%.*]] = call <4 x float> @llvm.experimental.constrained.fsub.v4f32(<4 x float> [[V1]], <4 x float> [[MUL_I]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: ret <4 x float> [[SUB_I]] |
| // |
| float32x4_t test_vmlsq_f32(float32x4_t v1, float32x4_t v2, float32x4_t v3) { |
| return vmlsq_f32(v1, v2, v3); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local <2 x double> @test_vmlsq_f64( |
| // UNCONSTRAINED-SAME: <2 x double> noundef [[V1:%.*]], <2 x double> noundef [[V2:%.*]], <2 x double> noundef [[V3:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[MUL_I:%.*]] = fmul <2 x double> [[V2]], [[V3]] |
| // UNCONSTRAINED-NEXT: [[SUB_I:%.*]] = fsub <2 x double> [[V1]], [[MUL_I]] |
| // UNCONSTRAINED-NEXT: ret <2 x double> [[SUB_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local <2 x double> @test_vmlsq_f64( |
| // CONSTRAINED-SAME: <2 x double> noundef [[V1:%.*]], <2 x double> noundef [[V2:%.*]], <2 x double> noundef [[V3:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[MUL_I:%.*]] = call <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double> [[V2]], <2 x double> [[V3]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: [[SUB_I:%.*]] = call <2 x double> @llvm.experimental.constrained.fsub.v2f64(<2 x double> [[V1]], <2 x double> [[MUL_I]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: ret <2 x double> [[SUB_I]] |
| // |
| float64x2_t test_vmlsq_f64(float64x2_t v1, float64x2_t v2, float64x2_t v3) { |
| return vmlsq_f64(v1, v2, v3); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local <2 x float> @test_vfma_f32( |
| // UNCONSTRAINED-SAME: <2 x float> noundef [[V1:%.*]], <2 x float> noundef [[V2:%.*]], <2 x float> noundef [[V3:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[TMP0:%.*]] = bitcast <2 x float> [[V1]] to <2 x i32> |
| // UNCONSTRAINED-NEXT: [[TMP1:%.*]] = bitcast <2 x float> [[V2]] to <2 x i32> |
| // UNCONSTRAINED-NEXT: [[TMP2:%.*]] = bitcast <2 x float> [[V3]] to <2 x i32> |
| // UNCONSTRAINED-NEXT: [[TMP3:%.*]] = bitcast <2 x i32> [[TMP0]] to <8 x i8> |
| // UNCONSTRAINED-NEXT: [[TMP4:%.*]] = bitcast <2 x i32> [[TMP1]] to <8 x i8> |
| // UNCONSTRAINED-NEXT: [[TMP5:%.*]] = bitcast <2 x i32> [[TMP2]] to <8 x i8> |
| // UNCONSTRAINED-NEXT: [[TMP6:%.*]] = bitcast <8 x i8> [[TMP3]] to <2 x float> |
| // UNCONSTRAINED-NEXT: [[TMP7:%.*]] = bitcast <8 x i8> [[TMP4]] to <2 x float> |
| // UNCONSTRAINED-NEXT: [[TMP8:%.*]] = bitcast <8 x i8> [[TMP5]] to <2 x float> |
| // UNCONSTRAINED-NEXT: [[TMP9:%.*]] = call <2 x float> @llvm.fma.v2f32(<2 x float> [[TMP7]], <2 x float> [[TMP8]], <2 x float> [[TMP6]]) |
| // UNCONSTRAINED-NEXT: ret <2 x float> [[TMP9]] |
| // |
| // CONSTRAINED-LABEL: define dso_local <2 x float> @test_vfma_f32( |
| // CONSTRAINED-SAME: <2 x float> noundef [[V1:%.*]], <2 x float> noundef [[V2:%.*]], <2 x float> noundef [[V3:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[TMP0:%.*]] = bitcast <2 x float> [[V1]] to <2 x i32> |
| // CONSTRAINED-NEXT: [[TMP1:%.*]] = bitcast <2 x float> [[V2]] to <2 x i32> |
| // CONSTRAINED-NEXT: [[TMP2:%.*]] = bitcast <2 x float> [[V3]] to <2 x i32> |
| // CONSTRAINED-NEXT: [[TMP3:%.*]] = bitcast <2 x i32> [[TMP0]] to <8 x i8> |
| // CONSTRAINED-NEXT: [[TMP4:%.*]] = bitcast <2 x i32> [[TMP1]] to <8 x i8> |
| // CONSTRAINED-NEXT: [[TMP5:%.*]] = bitcast <2 x i32> [[TMP2]] to <8 x i8> |
| // CONSTRAINED-NEXT: [[TMP6:%.*]] = bitcast <8 x i8> [[TMP3]] to <2 x float> |
| // CONSTRAINED-NEXT: [[TMP7:%.*]] = bitcast <8 x i8> [[TMP4]] to <2 x float> |
| // CONSTRAINED-NEXT: [[TMP8:%.*]] = bitcast <8 x i8> [[TMP5]] to <2 x float> |
| // CONSTRAINED-NEXT: [[TMP9:%.*]] = call <2 x float> @llvm.experimental.constrained.fma.v2f32(<2 x float> [[TMP7]], <2 x float> [[TMP8]], <2 x float> [[TMP6]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: ret <2 x float> [[TMP9]] |
| // |
| float32x2_t test_vfma_f32(float32x2_t v1, float32x2_t v2, float32x2_t v3) { |
| return vfma_f32(v1, v2, v3); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local <4 x float> @test_vfmaq_f32( |
| // UNCONSTRAINED-SAME: <4 x float> noundef [[V1:%.*]], <4 x float> noundef [[V2:%.*]], <4 x float> noundef [[V3:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[V1]] to <4 x i32> |
| // UNCONSTRAINED-NEXT: [[TMP1:%.*]] = bitcast <4 x float> [[V2]] to <4 x i32> |
| // UNCONSTRAINED-NEXT: [[TMP2:%.*]] = bitcast <4 x float> [[V3]] to <4 x i32> |
| // UNCONSTRAINED-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP0]] to <16 x i8> |
| // UNCONSTRAINED-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP1]] to <16 x i8> |
| // UNCONSTRAINED-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP2]] to <16 x i8> |
| // UNCONSTRAINED-NEXT: [[TMP6:%.*]] = bitcast <16 x i8> [[TMP3]] to <4 x float> |
| // UNCONSTRAINED-NEXT: [[TMP7:%.*]] = bitcast <16 x i8> [[TMP4]] to <4 x float> |
| // UNCONSTRAINED-NEXT: [[TMP8:%.*]] = bitcast <16 x i8> [[TMP5]] to <4 x float> |
| // UNCONSTRAINED-NEXT: [[TMP9:%.*]] = call <4 x float> @llvm.fma.v4f32(<4 x float> [[TMP7]], <4 x float> [[TMP8]], <4 x float> [[TMP6]]) |
| // UNCONSTRAINED-NEXT: ret <4 x float> [[TMP9]] |
| // |
| // CONSTRAINED-LABEL: define dso_local <4 x float> @test_vfmaq_f32( |
| // CONSTRAINED-SAME: <4 x float> noundef [[V1:%.*]], <4 x float> noundef [[V2:%.*]], <4 x float> noundef [[V3:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[V1]] to <4 x i32> |
| // CONSTRAINED-NEXT: [[TMP1:%.*]] = bitcast <4 x float> [[V2]] to <4 x i32> |
| // CONSTRAINED-NEXT: [[TMP2:%.*]] = bitcast <4 x float> [[V3]] to <4 x i32> |
| // CONSTRAINED-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP0]] to <16 x i8> |
| // CONSTRAINED-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP1]] to <16 x i8> |
| // CONSTRAINED-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP2]] to <16 x i8> |
| // CONSTRAINED-NEXT: [[TMP6:%.*]] = bitcast <16 x i8> [[TMP3]] to <4 x float> |
| // CONSTRAINED-NEXT: [[TMP7:%.*]] = bitcast <16 x i8> [[TMP4]] to <4 x float> |
| // CONSTRAINED-NEXT: [[TMP8:%.*]] = bitcast <16 x i8> [[TMP5]] to <4 x float> |
| // CONSTRAINED-NEXT: [[TMP9:%.*]] = call <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float> [[TMP7]], <4 x float> [[TMP8]], <4 x float> [[TMP6]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: ret <4 x float> [[TMP9]] |
| // |
| float32x4_t test_vfmaq_f32(float32x4_t v1, float32x4_t v2, float32x4_t v3) { |
| return vfmaq_f32(v1, v2, v3); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local <2 x double> @test_vfmaq_f64( |
| // UNCONSTRAINED-SAME: <2 x double> noundef [[V1:%.*]], <2 x double> noundef [[V2:%.*]], <2 x double> noundef [[V3:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[TMP0:%.*]] = bitcast <2 x double> [[V1]] to <2 x i64> |
| // UNCONSTRAINED-NEXT: [[TMP1:%.*]] = bitcast <2 x double> [[V2]] to <2 x i64> |
| // UNCONSTRAINED-NEXT: [[TMP2:%.*]] = bitcast <2 x double> [[V3]] to <2 x i64> |
| // UNCONSTRAINED-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP0]] to <16 x i8> |
| // UNCONSTRAINED-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to <16 x i8> |
| // UNCONSTRAINED-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP2]] to <16 x i8> |
| // UNCONSTRAINED-NEXT: [[TMP6:%.*]] = bitcast <16 x i8> [[TMP3]] to <2 x double> |
| // UNCONSTRAINED-NEXT: [[TMP7:%.*]] = bitcast <16 x i8> [[TMP4]] to <2 x double> |
| // UNCONSTRAINED-NEXT: [[TMP8:%.*]] = bitcast <16 x i8> [[TMP5]] to <2 x double> |
| // UNCONSTRAINED-NEXT: [[TMP9:%.*]] = call <2 x double> @llvm.fma.v2f64(<2 x double> [[TMP7]], <2 x double> [[TMP8]], <2 x double> [[TMP6]]) |
| // UNCONSTRAINED-NEXT: ret <2 x double> [[TMP9]] |
| // |
| // CONSTRAINED-LABEL: define dso_local <2 x double> @test_vfmaq_f64( |
| // CONSTRAINED-SAME: <2 x double> noundef [[V1:%.*]], <2 x double> noundef [[V2:%.*]], <2 x double> noundef [[V3:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[TMP0:%.*]] = bitcast <2 x double> [[V1]] to <2 x i64> |
| // CONSTRAINED-NEXT: [[TMP1:%.*]] = bitcast <2 x double> [[V2]] to <2 x i64> |
| // CONSTRAINED-NEXT: [[TMP2:%.*]] = bitcast <2 x double> [[V3]] to <2 x i64> |
| // CONSTRAINED-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP0]] to <16 x i8> |
| // CONSTRAINED-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to <16 x i8> |
| // CONSTRAINED-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP2]] to <16 x i8> |
| // CONSTRAINED-NEXT: [[TMP6:%.*]] = bitcast <16 x i8> [[TMP3]] to <2 x double> |
| // CONSTRAINED-NEXT: [[TMP7:%.*]] = bitcast <16 x i8> [[TMP4]] to <2 x double> |
| // CONSTRAINED-NEXT: [[TMP8:%.*]] = bitcast <16 x i8> [[TMP5]] to <2 x double> |
| // CONSTRAINED-NEXT: [[TMP9:%.*]] = call <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double> [[TMP7]], <2 x double> [[TMP8]], <2 x double> [[TMP6]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: ret <2 x double> [[TMP9]] |
| // |
| float64x2_t test_vfmaq_f64(float64x2_t v1, float64x2_t v2, float64x2_t v3) { |
| return vfmaq_f64(v1, v2, v3); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local <2 x float> @test_vfms_f32( |
| // UNCONSTRAINED-SAME: <2 x float> noundef [[V1:%.*]], <2 x float> noundef [[V2:%.*]], <2 x float> noundef [[V3:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[FNEG_I:%.*]] = fneg <2 x float> [[V2]] |
| // UNCONSTRAINED-NEXT: [[TMP0:%.*]] = bitcast <2 x float> [[V1]] to <2 x i32> |
| // UNCONSTRAINED-NEXT: [[TMP1:%.*]] = bitcast <2 x float> [[FNEG_I]] to <2 x i32> |
| // UNCONSTRAINED-NEXT: [[TMP2:%.*]] = bitcast <2 x float> [[V3]] to <2 x i32> |
| // UNCONSTRAINED-NEXT: [[TMP3:%.*]] = bitcast <2 x i32> [[TMP0]] to <8 x i8> |
| // UNCONSTRAINED-NEXT: [[TMP4:%.*]] = bitcast <2 x i32> [[TMP1]] to <8 x i8> |
| // UNCONSTRAINED-NEXT: [[TMP5:%.*]] = bitcast <2 x i32> [[TMP2]] to <8 x i8> |
| // UNCONSTRAINED-NEXT: [[TMP6:%.*]] = bitcast <8 x i8> [[TMP3]] to <2 x float> |
| // UNCONSTRAINED-NEXT: [[TMP7:%.*]] = bitcast <8 x i8> [[TMP4]] to <2 x float> |
| // UNCONSTRAINED-NEXT: [[TMP8:%.*]] = bitcast <8 x i8> [[TMP5]] to <2 x float> |
| // UNCONSTRAINED-NEXT: [[TMP9:%.*]] = call <2 x float> @llvm.fma.v2f32(<2 x float> [[TMP7]], <2 x float> [[TMP8]], <2 x float> [[TMP6]]) |
| // UNCONSTRAINED-NEXT: ret <2 x float> [[TMP9]] |
| // |
| // CONSTRAINED-LABEL: define dso_local <2 x float> @test_vfms_f32( |
| // CONSTRAINED-SAME: <2 x float> noundef [[V1:%.*]], <2 x float> noundef [[V2:%.*]], <2 x float> noundef [[V3:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[FNEG_I:%.*]] = fneg <2 x float> [[V2]] |
| // CONSTRAINED-NEXT: [[TMP0:%.*]] = bitcast <2 x float> [[V1]] to <2 x i32> |
| // CONSTRAINED-NEXT: [[TMP1:%.*]] = bitcast <2 x float> [[FNEG_I]] to <2 x i32> |
| // CONSTRAINED-NEXT: [[TMP2:%.*]] = bitcast <2 x float> [[V3]] to <2 x i32> |
| // CONSTRAINED-NEXT: [[TMP3:%.*]] = bitcast <2 x i32> [[TMP0]] to <8 x i8> |
| // CONSTRAINED-NEXT: [[TMP4:%.*]] = bitcast <2 x i32> [[TMP1]] to <8 x i8> |
| // CONSTRAINED-NEXT: [[TMP5:%.*]] = bitcast <2 x i32> [[TMP2]] to <8 x i8> |
| // CONSTRAINED-NEXT: [[TMP6:%.*]] = bitcast <8 x i8> [[TMP3]] to <2 x float> |
| // CONSTRAINED-NEXT: [[TMP7:%.*]] = bitcast <8 x i8> [[TMP4]] to <2 x float> |
| // CONSTRAINED-NEXT: [[TMP8:%.*]] = bitcast <8 x i8> [[TMP5]] to <2 x float> |
| // CONSTRAINED-NEXT: [[TMP9:%.*]] = call <2 x float> @llvm.experimental.constrained.fma.v2f32(<2 x float> [[TMP7]], <2 x float> [[TMP8]], <2 x float> [[TMP6]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: ret <2 x float> [[TMP9]] |
| // |
| float32x2_t test_vfms_f32(float32x2_t v1, float32x2_t v2, float32x2_t v3) { |
| return vfms_f32(v1, v2, v3); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local <4 x float> @test_vfmsq_f32( |
| // UNCONSTRAINED-SAME: <4 x float> noundef [[V1:%.*]], <4 x float> noundef [[V2:%.*]], <4 x float> noundef [[V3:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[FNEG_I:%.*]] = fneg <4 x float> [[V2]] |
| // UNCONSTRAINED-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[V1]] to <4 x i32> |
| // UNCONSTRAINED-NEXT: [[TMP1:%.*]] = bitcast <4 x float> [[FNEG_I]] to <4 x i32> |
| // UNCONSTRAINED-NEXT: [[TMP2:%.*]] = bitcast <4 x float> [[V3]] to <4 x i32> |
| // UNCONSTRAINED-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP0]] to <16 x i8> |
| // UNCONSTRAINED-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP1]] to <16 x i8> |
| // UNCONSTRAINED-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP2]] to <16 x i8> |
| // UNCONSTRAINED-NEXT: [[TMP6:%.*]] = bitcast <16 x i8> [[TMP3]] to <4 x float> |
| // UNCONSTRAINED-NEXT: [[TMP7:%.*]] = bitcast <16 x i8> [[TMP4]] to <4 x float> |
| // UNCONSTRAINED-NEXT: [[TMP8:%.*]] = bitcast <16 x i8> [[TMP5]] to <4 x float> |
| // UNCONSTRAINED-NEXT: [[TMP9:%.*]] = call <4 x float> @llvm.fma.v4f32(<4 x float> [[TMP7]], <4 x float> [[TMP8]], <4 x float> [[TMP6]]) |
| // UNCONSTRAINED-NEXT: ret <4 x float> [[TMP9]] |
| // |
| // CONSTRAINED-LABEL: define dso_local <4 x float> @test_vfmsq_f32( |
| // CONSTRAINED-SAME: <4 x float> noundef [[V1:%.*]], <4 x float> noundef [[V2:%.*]], <4 x float> noundef [[V3:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[FNEG_I:%.*]] = fneg <4 x float> [[V2]] |
| // CONSTRAINED-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[V1]] to <4 x i32> |
| // CONSTRAINED-NEXT: [[TMP1:%.*]] = bitcast <4 x float> [[FNEG_I]] to <4 x i32> |
| // CONSTRAINED-NEXT: [[TMP2:%.*]] = bitcast <4 x float> [[V3]] to <4 x i32> |
| // CONSTRAINED-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP0]] to <16 x i8> |
| // CONSTRAINED-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP1]] to <16 x i8> |
| // CONSTRAINED-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP2]] to <16 x i8> |
| // CONSTRAINED-NEXT: [[TMP6:%.*]] = bitcast <16 x i8> [[TMP3]] to <4 x float> |
| // CONSTRAINED-NEXT: [[TMP7:%.*]] = bitcast <16 x i8> [[TMP4]] to <4 x float> |
| // CONSTRAINED-NEXT: [[TMP8:%.*]] = bitcast <16 x i8> [[TMP5]] to <4 x float> |
| // CONSTRAINED-NEXT: [[TMP9:%.*]] = call <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float> [[TMP7]], <4 x float> [[TMP8]], <4 x float> [[TMP6]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: ret <4 x float> [[TMP9]] |
| // |
| float32x4_t test_vfmsq_f32(float32x4_t v1, float32x4_t v2, float32x4_t v3) { |
| return vfmsq_f32(v1, v2, v3); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local <2 x double> @test_vfmsq_f64( |
| // UNCONSTRAINED-SAME: <2 x double> noundef [[V1:%.*]], <2 x double> noundef [[V2:%.*]], <2 x double> noundef [[V3:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[FNEG_I:%.*]] = fneg <2 x double> [[V2]] |
| // UNCONSTRAINED-NEXT: [[TMP0:%.*]] = bitcast <2 x double> [[V1]] to <2 x i64> |
| // UNCONSTRAINED-NEXT: [[TMP1:%.*]] = bitcast <2 x double> [[FNEG_I]] to <2 x i64> |
| // UNCONSTRAINED-NEXT: [[TMP2:%.*]] = bitcast <2 x double> [[V3]] to <2 x i64> |
| // UNCONSTRAINED-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP0]] to <16 x i8> |
| // UNCONSTRAINED-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to <16 x i8> |
| // UNCONSTRAINED-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP2]] to <16 x i8> |
| // UNCONSTRAINED-NEXT: [[TMP6:%.*]] = bitcast <16 x i8> [[TMP3]] to <2 x double> |
| // UNCONSTRAINED-NEXT: [[TMP7:%.*]] = bitcast <16 x i8> [[TMP4]] to <2 x double> |
| // UNCONSTRAINED-NEXT: [[TMP8:%.*]] = bitcast <16 x i8> [[TMP5]] to <2 x double> |
| // UNCONSTRAINED-NEXT: [[TMP9:%.*]] = call <2 x double> @llvm.fma.v2f64(<2 x double> [[TMP7]], <2 x double> [[TMP8]], <2 x double> [[TMP6]]) |
| // UNCONSTRAINED-NEXT: ret <2 x double> [[TMP9]] |
| // |
| // CONSTRAINED-LABEL: define dso_local <2 x double> @test_vfmsq_f64( |
| // CONSTRAINED-SAME: <2 x double> noundef [[V1:%.*]], <2 x double> noundef [[V2:%.*]], <2 x double> noundef [[V3:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[FNEG_I:%.*]] = fneg <2 x double> [[V2]] |
| // CONSTRAINED-NEXT: [[TMP0:%.*]] = bitcast <2 x double> [[V1]] to <2 x i64> |
| // CONSTRAINED-NEXT: [[TMP1:%.*]] = bitcast <2 x double> [[FNEG_I]] to <2 x i64> |
| // CONSTRAINED-NEXT: [[TMP2:%.*]] = bitcast <2 x double> [[V3]] to <2 x i64> |
| // CONSTRAINED-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP0]] to <16 x i8> |
| // CONSTRAINED-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to <16 x i8> |
| // CONSTRAINED-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP2]] to <16 x i8> |
| // CONSTRAINED-NEXT: [[TMP6:%.*]] = bitcast <16 x i8> [[TMP3]] to <2 x double> |
| // CONSTRAINED-NEXT: [[TMP7:%.*]] = bitcast <16 x i8> [[TMP4]] to <2 x double> |
| // CONSTRAINED-NEXT: [[TMP8:%.*]] = bitcast <16 x i8> [[TMP5]] to <2 x double> |
| // CONSTRAINED-NEXT: [[TMP9:%.*]] = call <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double> [[TMP7]], <2 x double> [[TMP8]], <2 x double> [[TMP6]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: ret <2 x double> [[TMP9]] |
| // |
| float64x2_t test_vfmsq_f64(float64x2_t v1, float64x2_t v2, float64x2_t v3) { |
| return vfmsq_f64(v1, v2, v3); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local <2 x double> @test_vdivq_f64( |
| // UNCONSTRAINED-SAME: <2 x double> noundef [[V1:%.*]], <2 x double> noundef [[V2:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[DIV_I:%.*]] = fdiv <2 x double> [[V1]], [[V2]] |
| // UNCONSTRAINED-NEXT: ret <2 x double> [[DIV_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local <2 x double> @test_vdivq_f64( |
| // CONSTRAINED-SAME: <2 x double> noundef [[V1:%.*]], <2 x double> noundef [[V2:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[DIV_I:%.*]] = call <2 x double> @llvm.experimental.constrained.fdiv.v2f64(<2 x double> [[V1]], <2 x double> [[V2]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: ret <2 x double> [[DIV_I]] |
| // |
| float64x2_t test_vdivq_f64(float64x2_t v1, float64x2_t v2) { |
| return vdivq_f64(v1, v2); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local <4 x float> @test_vdivq_f32( |
| // UNCONSTRAINED-SAME: <4 x float> noundef [[V1:%.*]], <4 x float> noundef [[V2:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[DIV_I:%.*]] = fdiv <4 x float> [[V1]], [[V2]] |
| // UNCONSTRAINED-NEXT: ret <4 x float> [[DIV_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local <4 x float> @test_vdivq_f32( |
| // CONSTRAINED-SAME: <4 x float> noundef [[V1:%.*]], <4 x float> noundef [[V2:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[DIV_I:%.*]] = call <4 x float> @llvm.experimental.constrained.fdiv.v4f32(<4 x float> [[V1]], <4 x float> [[V2]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: ret <4 x float> [[DIV_I]] |
| // |
| float32x4_t test_vdivq_f32(float32x4_t v1, float32x4_t v2) { |
| return vdivq_f32(v1, v2); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local <2 x float> @test_vdiv_f32( |
| // UNCONSTRAINED-SAME: <2 x float> noundef [[V1:%.*]], <2 x float> noundef [[V2:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[DIV_I:%.*]] = fdiv <2 x float> [[V1]], [[V2]] |
| // UNCONSTRAINED-NEXT: ret <2 x float> [[DIV_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local <2 x float> @test_vdiv_f32( |
| // CONSTRAINED-SAME: <2 x float> noundef [[V1:%.*]], <2 x float> noundef [[V2:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[DIV_I:%.*]] = call <2 x float> @llvm.experimental.constrained.fdiv.v2f32(<2 x float> [[V1]], <2 x float> [[V2]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: ret <2 x float> [[DIV_I]] |
| // |
| float32x2_t test_vdiv_f32(float32x2_t v1, float32x2_t v2) { |
| return vdiv_f32(v1, v2); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local <2 x i32> @test_vceq_f32( |
| // UNCONSTRAINED-SAME: <2 x float> noundef [[V1:%.*]], <2 x float> noundef [[V2:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[CMP_I:%.*]] = fcmp oeq <2 x float> [[V1]], [[V2]] |
| // UNCONSTRAINED-NEXT: [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i32> |
| // UNCONSTRAINED-NEXT: ret <2 x i32> [[SEXT_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local <2 x i32> @test_vceq_f32( |
| // CONSTRAINED-SAME: <2 x float> noundef [[V1:%.*]], <2 x float> noundef [[V2:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[CMP_I:%.*]] = call <2 x i1> @llvm.experimental.constrained.fcmp.v2f32(<2 x float> [[V1]], <2 x float> [[V2]], metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i32> |
| // CONSTRAINED-NEXT: ret <2 x i32> [[SEXT_I]] |
| // |
| uint32x2_t test_vceq_f32(float32x2_t v1, float32x2_t v2) { |
| return vceq_f32(v1, v2); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local <1 x i64> @test_vceq_f64( |
| // UNCONSTRAINED-SAME: <1 x double> noundef [[A:%.*]], <1 x double> noundef [[B:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[CMP_I:%.*]] = fcmp oeq <1 x double> [[A]], [[B]] |
| // UNCONSTRAINED-NEXT: [[SEXT_I:%.*]] = sext <1 x i1> [[CMP_I]] to <1 x i64> |
| // UNCONSTRAINED-NEXT: ret <1 x i64> [[SEXT_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local <1 x i64> @test_vceq_f64( |
| // CONSTRAINED-SAME: <1 x double> noundef [[A:%.*]], <1 x double> noundef [[B:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[CMP_I:%.*]] = call <1 x i1> @llvm.experimental.constrained.fcmp.v1f64(<1 x double> [[A]], <1 x double> [[B]], metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: [[SEXT_I:%.*]] = sext <1 x i1> [[CMP_I]] to <1 x i64> |
| // CONSTRAINED-NEXT: ret <1 x i64> [[SEXT_I]] |
| // |
| uint64x1_t test_vceq_f64(float64x1_t a, float64x1_t b) { |
| return vceq_f64(a, b); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local <4 x i32> @test_vceqq_f32( |
| // UNCONSTRAINED-SAME: <4 x float> noundef [[V1:%.*]], <4 x float> noundef [[V2:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[CMP_I:%.*]] = fcmp oeq <4 x float> [[V1]], [[V2]] |
| // UNCONSTRAINED-NEXT: [[SEXT_I:%.*]] = sext <4 x i1> [[CMP_I]] to <4 x i32> |
| // UNCONSTRAINED-NEXT: ret <4 x i32> [[SEXT_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local <4 x i32> @test_vceqq_f32( |
| // CONSTRAINED-SAME: <4 x float> noundef [[V1:%.*]], <4 x float> noundef [[V2:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[CMP_I:%.*]] = call <4 x i1> @llvm.experimental.constrained.fcmp.v4f32(<4 x float> [[V1]], <4 x float> [[V2]], metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: [[SEXT_I:%.*]] = sext <4 x i1> [[CMP_I]] to <4 x i32> |
| // CONSTRAINED-NEXT: ret <4 x i32> [[SEXT_I]] |
| // |
| uint32x4_t test_vceqq_f32(float32x4_t v1, float32x4_t v2) { |
| return vceqq_f32(v1, v2); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local <2 x i64> @test_vceqq_f64( |
| // UNCONSTRAINED-SAME: <2 x double> noundef [[V1:%.*]], <2 x double> noundef [[V2:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[CMP_I:%.*]] = fcmp oeq <2 x double> [[V1]], [[V2]] |
| // UNCONSTRAINED-NEXT: [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i64> |
| // UNCONSTRAINED-NEXT: ret <2 x i64> [[SEXT_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local <2 x i64> @test_vceqq_f64( |
| // CONSTRAINED-SAME: <2 x double> noundef [[V1:%.*]], <2 x double> noundef [[V2:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[CMP_I:%.*]] = call <2 x i1> @llvm.experimental.constrained.fcmp.v2f64(<2 x double> [[V1]], <2 x double> [[V2]], metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i64> |
| // CONSTRAINED-NEXT: ret <2 x i64> [[SEXT_I]] |
| // |
| uint64x2_t test_vceqq_f64(float64x2_t v1, float64x2_t v2) { |
| return vceqq_f64(v1, v2); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local <2 x i32> @test_vcge_f32( |
| // UNCONSTRAINED-SAME: <2 x float> noundef [[V1:%.*]], <2 x float> noundef [[V2:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[CMP_I:%.*]] = fcmp oge <2 x float> [[V1]], [[V2]] |
| // UNCONSTRAINED-NEXT: [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i32> |
| // UNCONSTRAINED-NEXT: ret <2 x i32> [[SEXT_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local <2 x i32> @test_vcge_f32( |
| // CONSTRAINED-SAME: <2 x float> noundef [[V1:%.*]], <2 x float> noundef [[V2:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[CMP_I:%.*]] = call <2 x i1> @llvm.experimental.constrained.fcmps.v2f32(<2 x float> [[V1]], <2 x float> [[V2]], metadata !"oge", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i32> |
| // CONSTRAINED-NEXT: ret <2 x i32> [[SEXT_I]] |
| // |
| uint32x2_t test_vcge_f32(float32x2_t v1, float32x2_t v2) { |
| return vcge_f32(v1, v2); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local <1 x i64> @test_vcge_f64( |
| // UNCONSTRAINED-SAME: <1 x double> noundef [[A:%.*]], <1 x double> noundef [[B:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[CMP_I:%.*]] = fcmp oge <1 x double> [[A]], [[B]] |
| // UNCONSTRAINED-NEXT: [[SEXT_I:%.*]] = sext <1 x i1> [[CMP_I]] to <1 x i64> |
| // UNCONSTRAINED-NEXT: ret <1 x i64> [[SEXT_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local <1 x i64> @test_vcge_f64( |
| // CONSTRAINED-SAME: <1 x double> noundef [[A:%.*]], <1 x double> noundef [[B:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[CMP_I:%.*]] = call <1 x i1> @llvm.experimental.constrained.fcmps.v1f64(<1 x double> [[A]], <1 x double> [[B]], metadata !"oge", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: [[SEXT_I:%.*]] = sext <1 x i1> [[CMP_I]] to <1 x i64> |
| // CONSTRAINED-NEXT: ret <1 x i64> [[SEXT_I]] |
| // |
| uint64x1_t test_vcge_f64(float64x1_t a, float64x1_t b) { |
| return vcge_f64(a, b); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local <4 x i32> @test_vcgeq_f32( |
| // UNCONSTRAINED-SAME: <4 x float> noundef [[V1:%.*]], <4 x float> noundef [[V2:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[CMP_I:%.*]] = fcmp oge <4 x float> [[V1]], [[V2]] |
| // UNCONSTRAINED-NEXT: [[SEXT_I:%.*]] = sext <4 x i1> [[CMP_I]] to <4 x i32> |
| // UNCONSTRAINED-NEXT: ret <4 x i32> [[SEXT_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local <4 x i32> @test_vcgeq_f32( |
| // CONSTRAINED-SAME: <4 x float> noundef [[V1:%.*]], <4 x float> noundef [[V2:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[CMP_I:%.*]] = call <4 x i1> @llvm.experimental.constrained.fcmps.v4f32(<4 x float> [[V1]], <4 x float> [[V2]], metadata !"oge", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: [[SEXT_I:%.*]] = sext <4 x i1> [[CMP_I]] to <4 x i32> |
| // CONSTRAINED-NEXT: ret <4 x i32> [[SEXT_I]] |
| // |
| uint32x4_t test_vcgeq_f32(float32x4_t v1, float32x4_t v2) { |
| return vcgeq_f32(v1, v2); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local <2 x i64> @test_vcgeq_f64( |
| // UNCONSTRAINED-SAME: <2 x double> noundef [[V1:%.*]], <2 x double> noundef [[V2:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[CMP_I:%.*]] = fcmp oge <2 x double> [[V1]], [[V2]] |
| // UNCONSTRAINED-NEXT: [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i64> |
| // UNCONSTRAINED-NEXT: ret <2 x i64> [[SEXT_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local <2 x i64> @test_vcgeq_f64( |
| // CONSTRAINED-SAME: <2 x double> noundef [[V1:%.*]], <2 x double> noundef [[V2:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[CMP_I:%.*]] = call <2 x i1> @llvm.experimental.constrained.fcmps.v2f64(<2 x double> [[V1]], <2 x double> [[V2]], metadata !"oge", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i64> |
| // CONSTRAINED-NEXT: ret <2 x i64> [[SEXT_I]] |
| // |
| uint64x2_t test_vcgeq_f64(float64x2_t v1, float64x2_t v2) { |
| return vcgeq_f64(v1, v2); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local <2 x i32> @test_vcle_f32( |
| // UNCONSTRAINED-SAME: <2 x float> noundef [[V1:%.*]], <2 x float> noundef [[V2:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[CMP_I:%.*]] = fcmp ole <2 x float> [[V1]], [[V2]] |
| // UNCONSTRAINED-NEXT: [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i32> |
| // UNCONSTRAINED-NEXT: ret <2 x i32> [[SEXT_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local <2 x i32> @test_vcle_f32( |
| // CONSTRAINED-SAME: <2 x float> noundef [[V1:%.*]], <2 x float> noundef [[V2:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[CMP_I:%.*]] = call <2 x i1> @llvm.experimental.constrained.fcmps.v2f32(<2 x float> [[V1]], <2 x float> [[V2]], metadata !"ole", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i32> |
| // CONSTRAINED-NEXT: ret <2 x i32> [[SEXT_I]] |
| // |
| uint32x2_t test_vcle_f32(float32x2_t v1, float32x2_t v2) { |
| return vcle_f32(v1, v2); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local <1 x i64> @test_vcle_f64( |
| // UNCONSTRAINED-SAME: <1 x double> noundef [[A:%.*]], <1 x double> noundef [[B:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[CMP_I:%.*]] = fcmp ole <1 x double> [[A]], [[B]] |
| // UNCONSTRAINED-NEXT: [[SEXT_I:%.*]] = sext <1 x i1> [[CMP_I]] to <1 x i64> |
| // UNCONSTRAINED-NEXT: ret <1 x i64> [[SEXT_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local <1 x i64> @test_vcle_f64( |
| // CONSTRAINED-SAME: <1 x double> noundef [[A:%.*]], <1 x double> noundef [[B:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[CMP_I:%.*]] = call <1 x i1> @llvm.experimental.constrained.fcmps.v1f64(<1 x double> [[A]], <1 x double> [[B]], metadata !"ole", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: [[SEXT_I:%.*]] = sext <1 x i1> [[CMP_I]] to <1 x i64> |
| // CONSTRAINED-NEXT: ret <1 x i64> [[SEXT_I]] |
| // |
| uint64x1_t test_vcle_f64(float64x1_t a, float64x1_t b) { |
| return vcle_f64(a, b); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local <4 x i32> @test_vcleq_f32( |
| // UNCONSTRAINED-SAME: <4 x float> noundef [[V1:%.*]], <4 x float> noundef [[V2:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[CMP_I:%.*]] = fcmp ole <4 x float> [[V1]], [[V2]] |
| // UNCONSTRAINED-NEXT: [[SEXT_I:%.*]] = sext <4 x i1> [[CMP_I]] to <4 x i32> |
| // UNCONSTRAINED-NEXT: ret <4 x i32> [[SEXT_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local <4 x i32> @test_vcleq_f32( |
| // CONSTRAINED-SAME: <4 x float> noundef [[V1:%.*]], <4 x float> noundef [[V2:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[CMP_I:%.*]] = call <4 x i1> @llvm.experimental.constrained.fcmps.v4f32(<4 x float> [[V1]], <4 x float> [[V2]], metadata !"ole", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: [[SEXT_I:%.*]] = sext <4 x i1> [[CMP_I]] to <4 x i32> |
| // CONSTRAINED-NEXT: ret <4 x i32> [[SEXT_I]] |
| // |
| uint32x4_t test_vcleq_f32(float32x4_t v1, float32x4_t v2) { |
| return vcleq_f32(v1, v2); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local <2 x i64> @test_vcleq_f64( |
| // UNCONSTRAINED-SAME: <2 x double> noundef [[V1:%.*]], <2 x double> noundef [[V2:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[CMP_I:%.*]] = fcmp ole <2 x double> [[V1]], [[V2]] |
| // UNCONSTRAINED-NEXT: [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i64> |
| // UNCONSTRAINED-NEXT: ret <2 x i64> [[SEXT_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local <2 x i64> @test_vcleq_f64( |
| // CONSTRAINED-SAME: <2 x double> noundef [[V1:%.*]], <2 x double> noundef [[V2:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[CMP_I:%.*]] = call <2 x i1> @llvm.experimental.constrained.fcmps.v2f64(<2 x double> [[V1]], <2 x double> [[V2]], metadata !"ole", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i64> |
| // CONSTRAINED-NEXT: ret <2 x i64> [[SEXT_I]] |
| // |
| uint64x2_t test_vcleq_f64(float64x2_t v1, float64x2_t v2) { |
| return vcleq_f64(v1, v2); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local <2 x i32> @test_vcgt_f32( |
| // UNCONSTRAINED-SAME: <2 x float> noundef [[V1:%.*]], <2 x float> noundef [[V2:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[CMP_I:%.*]] = fcmp ogt <2 x float> [[V1]], [[V2]] |
| // UNCONSTRAINED-NEXT: [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i32> |
| // UNCONSTRAINED-NEXT: ret <2 x i32> [[SEXT_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local <2 x i32> @test_vcgt_f32( |
| // CONSTRAINED-SAME: <2 x float> noundef [[V1:%.*]], <2 x float> noundef [[V2:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[CMP_I:%.*]] = call <2 x i1> @llvm.experimental.constrained.fcmps.v2f32(<2 x float> [[V1]], <2 x float> [[V2]], metadata !"ogt", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i32> |
| // CONSTRAINED-NEXT: ret <2 x i32> [[SEXT_I]] |
| // |
| uint32x2_t test_vcgt_f32(float32x2_t v1, float32x2_t v2) { |
| return vcgt_f32(v1, v2); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local <1 x i64> @test_vcgt_f64( |
| // UNCONSTRAINED-SAME: <1 x double> noundef [[A:%.*]], <1 x double> noundef [[B:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[CMP_I:%.*]] = fcmp ogt <1 x double> [[A]], [[B]] |
| // UNCONSTRAINED-NEXT: [[SEXT_I:%.*]] = sext <1 x i1> [[CMP_I]] to <1 x i64> |
| // UNCONSTRAINED-NEXT: ret <1 x i64> [[SEXT_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local <1 x i64> @test_vcgt_f64( |
| // CONSTRAINED-SAME: <1 x double> noundef [[A:%.*]], <1 x double> noundef [[B:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[CMP_I:%.*]] = call <1 x i1> @llvm.experimental.constrained.fcmps.v1f64(<1 x double> [[A]], <1 x double> [[B]], metadata !"ogt", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: [[SEXT_I:%.*]] = sext <1 x i1> [[CMP_I]] to <1 x i64> |
| // CONSTRAINED-NEXT: ret <1 x i64> [[SEXT_I]] |
| // |
| uint64x1_t test_vcgt_f64(float64x1_t a, float64x1_t b) { |
| return vcgt_f64(a, b); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local <4 x i32> @test_vcgtq_f32( |
| // UNCONSTRAINED-SAME: <4 x float> noundef [[V1:%.*]], <4 x float> noundef [[V2:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[CMP_I:%.*]] = fcmp ogt <4 x float> [[V1]], [[V2]] |
| // UNCONSTRAINED-NEXT: [[SEXT_I:%.*]] = sext <4 x i1> [[CMP_I]] to <4 x i32> |
| // UNCONSTRAINED-NEXT: ret <4 x i32> [[SEXT_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local <4 x i32> @test_vcgtq_f32( |
| // CONSTRAINED-SAME: <4 x float> noundef [[V1:%.*]], <4 x float> noundef [[V2:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[CMP_I:%.*]] = call <4 x i1> @llvm.experimental.constrained.fcmps.v4f32(<4 x float> [[V1]], <4 x float> [[V2]], metadata !"ogt", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: [[SEXT_I:%.*]] = sext <4 x i1> [[CMP_I]] to <4 x i32> |
| // CONSTRAINED-NEXT: ret <4 x i32> [[SEXT_I]] |
| // |
| uint32x4_t test_vcgtq_f32(float32x4_t v1, float32x4_t v2) { |
| return vcgtq_f32(v1, v2); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local <2 x i64> @test_vcgtq_f64( |
| // UNCONSTRAINED-SAME: <2 x double> noundef [[V1:%.*]], <2 x double> noundef [[V2:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[CMP_I:%.*]] = fcmp ogt <2 x double> [[V1]], [[V2]] |
| // UNCONSTRAINED-NEXT: [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i64> |
| // UNCONSTRAINED-NEXT: ret <2 x i64> [[SEXT_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local <2 x i64> @test_vcgtq_f64( |
| // CONSTRAINED-SAME: <2 x double> noundef [[V1:%.*]], <2 x double> noundef [[V2:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[CMP_I:%.*]] = call <2 x i1> @llvm.experimental.constrained.fcmps.v2f64(<2 x double> [[V1]], <2 x double> [[V2]], metadata !"ogt", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i64> |
| // CONSTRAINED-NEXT: ret <2 x i64> [[SEXT_I]] |
| // |
| uint64x2_t test_vcgtq_f64(float64x2_t v1, float64x2_t v2) { |
| return vcgtq_f64(v1, v2); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local <2 x i32> @test_vclt_f32( |
| // UNCONSTRAINED-SAME: <2 x float> noundef [[V1:%.*]], <2 x float> noundef [[V2:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[CMP_I:%.*]] = fcmp olt <2 x float> [[V1]], [[V2]] |
| // UNCONSTRAINED-NEXT: [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i32> |
| // UNCONSTRAINED-NEXT: ret <2 x i32> [[SEXT_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local <2 x i32> @test_vclt_f32( |
| // CONSTRAINED-SAME: <2 x float> noundef [[V1:%.*]], <2 x float> noundef [[V2:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[CMP_I:%.*]] = call <2 x i1> @llvm.experimental.constrained.fcmps.v2f32(<2 x float> [[V1]], <2 x float> [[V2]], metadata !"olt", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i32> |
| // CONSTRAINED-NEXT: ret <2 x i32> [[SEXT_I]] |
| // |
| uint32x2_t test_vclt_f32(float32x2_t v1, float32x2_t v2) { |
| return vclt_f32(v1, v2); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local <1 x i64> @test_vclt_f64( |
| // UNCONSTRAINED-SAME: <1 x double> noundef [[A:%.*]], <1 x double> noundef [[B:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[CMP_I:%.*]] = fcmp olt <1 x double> [[A]], [[B]] |
| // UNCONSTRAINED-NEXT: [[SEXT_I:%.*]] = sext <1 x i1> [[CMP_I]] to <1 x i64> |
| // UNCONSTRAINED-NEXT: ret <1 x i64> [[SEXT_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local <1 x i64> @test_vclt_f64( |
| // CONSTRAINED-SAME: <1 x double> noundef [[A:%.*]], <1 x double> noundef [[B:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[CMP_I:%.*]] = call <1 x i1> @llvm.experimental.constrained.fcmps.v1f64(<1 x double> [[A]], <1 x double> [[B]], metadata !"olt", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: [[SEXT_I:%.*]] = sext <1 x i1> [[CMP_I]] to <1 x i64> |
| // CONSTRAINED-NEXT: ret <1 x i64> [[SEXT_I]] |
| // |
| uint64x1_t test_vclt_f64(float64x1_t a, float64x1_t b) { |
| return vclt_f64(a, b); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local <4 x i32> @test_vcltq_f32( |
| // UNCONSTRAINED-SAME: <4 x float> noundef [[V1:%.*]], <4 x float> noundef [[V2:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[CMP_I:%.*]] = fcmp olt <4 x float> [[V1]], [[V2]] |
| // UNCONSTRAINED-NEXT: [[SEXT_I:%.*]] = sext <4 x i1> [[CMP_I]] to <4 x i32> |
| // UNCONSTRAINED-NEXT: ret <4 x i32> [[SEXT_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local <4 x i32> @test_vcltq_f32( |
| // CONSTRAINED-SAME: <4 x float> noundef [[V1:%.*]], <4 x float> noundef [[V2:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[CMP_I:%.*]] = call <4 x i1> @llvm.experimental.constrained.fcmps.v4f32(<4 x float> [[V1]], <4 x float> [[V2]], metadata !"olt", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: [[SEXT_I:%.*]] = sext <4 x i1> [[CMP_I]] to <4 x i32> |
| // CONSTRAINED-NEXT: ret <4 x i32> [[SEXT_I]] |
| // |
| uint32x4_t test_vcltq_f32(float32x4_t v1, float32x4_t v2) { |
| return vcltq_f32(v1, v2); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local <2 x i64> @test_vcltq_f64( |
| // UNCONSTRAINED-SAME: <2 x double> noundef [[V1:%.*]], <2 x double> noundef [[V2:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[CMP_I:%.*]] = fcmp olt <2 x double> [[V1]], [[V2]] |
| // UNCONSTRAINED-NEXT: [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i64> |
| // UNCONSTRAINED-NEXT: ret <2 x i64> [[SEXT_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local <2 x i64> @test_vcltq_f64( |
| // CONSTRAINED-SAME: <2 x double> noundef [[V1:%.*]], <2 x double> noundef [[V2:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[CMP_I:%.*]] = call <2 x i1> @llvm.experimental.constrained.fcmps.v2f64(<2 x double> [[V1]], <2 x double> [[V2]], metadata !"olt", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i64> |
| // CONSTRAINED-NEXT: ret <2 x i64> [[SEXT_I]] |
| // |
| uint64x2_t test_vcltq_f64(float64x2_t v1, float64x2_t v2) { |
| return vcltq_f64(v1, v2); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local float @test_vpadds_f32( |
| // UNCONSTRAINED-SAME: <2 x float> noundef [[A:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[LANE0_I:%.*]] = extractelement <2 x float> [[A]], i64 0 |
| // UNCONSTRAINED-NEXT: [[LANE1_I:%.*]] = extractelement <2 x float> [[A]], i64 1 |
| // UNCONSTRAINED-NEXT: [[VPADDD_I:%.*]] = fadd float [[LANE0_I]], [[LANE1_I]] |
| // UNCONSTRAINED-NEXT: ret float [[VPADDD_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local float @test_vpadds_f32( |
| // CONSTRAINED-SAME: <2 x float> noundef [[A:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[LANE0_I:%.*]] = extractelement <2 x float> [[A]], i64 0 |
| // CONSTRAINED-NEXT: [[LANE1_I:%.*]] = extractelement <2 x float> [[A]], i64 1 |
| // CONSTRAINED-NEXT: [[VPADDD_I:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[LANE0_I]], float [[LANE1_I]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: ret float [[VPADDD_I]] |
| // |
| float32_t test_vpadds_f32(float32x2_t a) { |
| return vpadds_f32(a); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local double @test_vpaddd_f64( |
| // UNCONSTRAINED-SAME: <2 x double> noundef [[A:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[LANE0_I:%.*]] = extractelement <2 x double> [[A]], i64 0 |
| // UNCONSTRAINED-NEXT: [[LANE1_I:%.*]] = extractelement <2 x double> [[A]], i64 1 |
| // UNCONSTRAINED-NEXT: [[VPADDD_I:%.*]] = fadd double [[LANE0_I]], [[LANE1_I]] |
| // UNCONSTRAINED-NEXT: ret double [[VPADDD_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local double @test_vpaddd_f64( |
| // CONSTRAINED-SAME: <2 x double> noundef [[A:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[LANE0_I:%.*]] = extractelement <2 x double> [[A]], i64 0 |
| // CONSTRAINED-NEXT: [[LANE1_I:%.*]] = extractelement <2 x double> [[A]], i64 1 |
| // CONSTRAINED-NEXT: [[VPADDD_I:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[LANE0_I]], double [[LANE1_I]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: ret double [[VPADDD_I]] |
| // |
| float64_t test_vpaddd_f64(float64x2_t a) { |
| return vpaddd_f64(a); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local float @test_vcvts_f32_s32( |
| // UNCONSTRAINED-SAME: i32 noundef [[A:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[TMP0:%.*]] = sitofp i32 [[A]] to float |
| // UNCONSTRAINED-NEXT: ret float [[TMP0]] |
| // |
| // CONSTRAINED-LABEL: define dso_local float @test_vcvts_f32_s32( |
| // CONSTRAINED-SAME: i32 noundef [[A:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[TMP0:%.*]] = call float @llvm.experimental.constrained.sitofp.f32.i32(i32 [[A]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: ret float [[TMP0]] |
| // |
| float32_t test_vcvts_f32_s32(int32_t a) { |
| return vcvts_f32_s32(a); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local double @test_vcvtd_f64_s64( |
| // UNCONSTRAINED-SAME: i64 noundef [[A:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[TMP0:%.*]] = sitofp i64 [[A]] to double |
| // UNCONSTRAINED-NEXT: ret double [[TMP0]] |
| // |
| // CONSTRAINED-LABEL: define dso_local double @test_vcvtd_f64_s64( |
| // CONSTRAINED-SAME: i64 noundef [[A:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[TMP0:%.*]] = call double @llvm.experimental.constrained.sitofp.f64.i64(i64 [[A]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: ret double [[TMP0]] |
| // |
| float64_t test_vcvtd_f64_s64(int64_t a) { |
| return vcvtd_f64_s64(a); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local float @test_vcvts_f32_u32( |
| // UNCONSTRAINED-SAME: i32 noundef [[A:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[TMP0:%.*]] = uitofp i32 [[A]] to float |
| // UNCONSTRAINED-NEXT: ret float [[TMP0]] |
| // |
| // CONSTRAINED-LABEL: define dso_local float @test_vcvts_f32_u32( |
| // CONSTRAINED-SAME: i32 noundef [[A:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[TMP0:%.*]] = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[A]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: ret float [[TMP0]] |
| // |
| float32_t test_vcvts_f32_u32(uint32_t a) { |
| return vcvts_f32_u32(a); |
| } |
| |
| // XXX should verify the type of registers |
| // UNCONSTRAINED-LABEL: define dso_local double @test_vcvtd_f64_u64( |
| // UNCONSTRAINED-SAME: i64 noundef [[A:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[TMP0:%.*]] = uitofp i64 [[A]] to double |
| // UNCONSTRAINED-NEXT: ret double [[TMP0]] |
| // |
| // CONSTRAINED-LABEL: define dso_local double @test_vcvtd_f64_u64( |
| // CONSTRAINED-SAME: i64 noundef [[A:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[TMP0:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i64(i64 [[A]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: ret double [[TMP0]] |
| // |
| float64_t test_vcvtd_f64_u64(uint64_t a) { |
| return vcvtd_f64_u64(a); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local i32 @test_vceqs_f32( |
| // UNCONSTRAINED-SAME: float noundef [[A:%.*]], float noundef [[B:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[TMP0:%.*]] = fcmp oeq float [[A]], [[B]] |
| // UNCONSTRAINED-NEXT: [[VCMPD_I:%.*]] = sext i1 [[TMP0]] to i32 |
| // UNCONSTRAINED-NEXT: ret i32 [[VCMPD_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local i32 @test_vceqs_f32( |
| // CONSTRAINED-SAME: float noundef [[A:%.*]], float noundef [[B:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[TMP0:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f32(float [[A]], float [[B]], metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: [[VCMPD_I:%.*]] = sext i1 [[TMP0]] to i32 |
| // CONSTRAINED-NEXT: ret i32 [[VCMPD_I]] |
| // |
| uint32_t test_vceqs_f32(float32_t a, float32_t b) { |
| return (uint32_t)vceqs_f32(a, b); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local i64 @test_vceqd_f64( |
| // UNCONSTRAINED-SAME: double noundef [[A:%.*]], double noundef [[B:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[TMP0:%.*]] = fcmp oeq double [[A]], [[B]] |
| // UNCONSTRAINED-NEXT: [[VCMPD_I:%.*]] = sext i1 [[TMP0]] to i64 |
| // UNCONSTRAINED-NEXT: ret i64 [[VCMPD_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local i64 @test_vceqd_f64( |
| // CONSTRAINED-SAME: double noundef [[A:%.*]], double noundef [[B:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[TMP0:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[A]], double [[B]], metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: [[VCMPD_I:%.*]] = sext i1 [[TMP0]] to i64 |
| // CONSTRAINED-NEXT: ret i64 [[VCMPD_I]] |
| // |
| uint64_t test_vceqd_f64(float64_t a, float64_t b) { |
| return (uint64_t)vceqd_f64(a, b); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local i32 @test_vceqzs_f32( |
| // UNCONSTRAINED-SAME: float noundef [[A:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[TMP0:%.*]] = fcmp oeq float [[A]], 0.000000e+00 |
| // UNCONSTRAINED-NEXT: [[VCEQZ_I:%.*]] = sext i1 [[TMP0]] to i32 |
| // UNCONSTRAINED-NEXT: ret i32 [[VCEQZ_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local i32 @test_vceqzs_f32( |
| // CONSTRAINED-SAME: float noundef [[A:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[TMP0:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f32(float [[A]], float 0.000000e+00, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: [[VCEQZ_I:%.*]] = sext i1 [[TMP0]] to i32 |
| // CONSTRAINED-NEXT: ret i32 [[VCEQZ_I]] |
| // |
| uint32_t test_vceqzs_f32(float32_t a) { |
| return (uint32_t)vceqzs_f32(a); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local i64 @test_vceqzd_f64( |
| // UNCONSTRAINED-SAME: double noundef [[A:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[TMP0:%.*]] = fcmp oeq double [[A]], 0.000000e+00 |
| // UNCONSTRAINED-NEXT: [[VCEQZ_I:%.*]] = sext i1 [[TMP0]] to i64 |
| // UNCONSTRAINED-NEXT: ret i64 [[VCEQZ_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local i64 @test_vceqzd_f64( |
| // CONSTRAINED-SAME: double noundef [[A:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[TMP0:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[A]], double 0.000000e+00, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: [[VCEQZ_I:%.*]] = sext i1 [[TMP0]] to i64 |
| // CONSTRAINED-NEXT: ret i64 [[VCEQZ_I]] |
| // |
| uint64_t test_vceqzd_f64(float64_t a) { |
| return (uint64_t)vceqzd_f64(a); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local i32 @test_vcges_f32( |
| // UNCONSTRAINED-SAME: float noundef [[A:%.*]], float noundef [[B:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[TMP0:%.*]] = fcmp oge float [[A]], [[B]] |
| // UNCONSTRAINED-NEXT: [[VCMPD_I:%.*]] = sext i1 [[TMP0]] to i32 |
| // UNCONSTRAINED-NEXT: ret i32 [[VCMPD_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local i32 @test_vcges_f32( |
| // CONSTRAINED-SAME: float noundef [[A:%.*]], float noundef [[B:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[TMP0:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f32(float [[A]], float [[B]], metadata !"oge", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: [[VCMPD_I:%.*]] = sext i1 [[TMP0]] to i32 |
| // CONSTRAINED-NEXT: ret i32 [[VCMPD_I]] |
| // |
| uint32_t test_vcges_f32(float32_t a, float32_t b) { |
| return (uint32_t)vcges_f32(a, b); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local i64 @test_vcged_f64( |
| // UNCONSTRAINED-SAME: double noundef [[A:%.*]], double noundef [[B:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[TMP0:%.*]] = fcmp oge double [[A]], [[B]] |
| // UNCONSTRAINED-NEXT: [[VCMPD_I:%.*]] = sext i1 [[TMP0]] to i64 |
| // UNCONSTRAINED-NEXT: ret i64 [[VCMPD_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local i64 @test_vcged_f64( |
| // CONSTRAINED-SAME: double noundef [[A:%.*]], double noundef [[B:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[TMP0:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double [[A]], double [[B]], metadata !"oge", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: [[VCMPD_I:%.*]] = sext i1 [[TMP0]] to i64 |
| // CONSTRAINED-NEXT: ret i64 [[VCMPD_I]] |
| // |
| uint64_t test_vcged_f64(float64_t a, float64_t b) { |
| return (uint64_t)vcged_f64(a, b); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local i32 @test_vcgezs_f32( |
| // UNCONSTRAINED-SAME: float noundef [[A:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[TMP0:%.*]] = fcmp oge float [[A]], 0.000000e+00 |
| // UNCONSTRAINED-NEXT: [[VCGEZ_I:%.*]] = sext i1 [[TMP0]] to i32 |
| // UNCONSTRAINED-NEXT: ret i32 [[VCGEZ_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local i32 @test_vcgezs_f32( |
| // CONSTRAINED-SAME: float noundef [[A:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[TMP0:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f32(float [[A]], float 0.000000e+00, metadata !"oge", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: [[VCGEZ_I:%.*]] = sext i1 [[TMP0]] to i32 |
| // CONSTRAINED-NEXT: ret i32 [[VCGEZ_I]] |
| // |
| uint32_t test_vcgezs_f32(float32_t a) { |
| return (uint32_t)vcgezs_f32(a); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local i64 @test_vcgezd_f64( |
| // UNCONSTRAINED-SAME: double noundef [[A:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[TMP0:%.*]] = fcmp oge double [[A]], 0.000000e+00 |
| // UNCONSTRAINED-NEXT: [[VCGEZ_I:%.*]] = sext i1 [[TMP0]] to i64 |
| // UNCONSTRAINED-NEXT: ret i64 [[VCGEZ_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local i64 @test_vcgezd_f64( |
| // CONSTRAINED-SAME: double noundef [[A:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[TMP0:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double [[A]], double 0.000000e+00, metadata !"oge", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: [[VCGEZ_I:%.*]] = sext i1 [[TMP0]] to i64 |
| // CONSTRAINED-NEXT: ret i64 [[VCGEZ_I]] |
| // |
| uint64_t test_vcgezd_f64(float64_t a) { |
| return (uint64_t)vcgezd_f64(a); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local i32 @test_vcgts_f32( |
| // UNCONSTRAINED-SAME: float noundef [[A:%.*]], float noundef [[B:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[TMP0:%.*]] = fcmp ogt float [[A]], [[B]] |
| // UNCONSTRAINED-NEXT: [[VCMPD_I:%.*]] = sext i1 [[TMP0]] to i32 |
| // UNCONSTRAINED-NEXT: ret i32 [[VCMPD_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local i32 @test_vcgts_f32( |
| // CONSTRAINED-SAME: float noundef [[A:%.*]], float noundef [[B:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[TMP0:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f32(float [[A]], float [[B]], metadata !"ogt", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: [[VCMPD_I:%.*]] = sext i1 [[TMP0]] to i32 |
| // CONSTRAINED-NEXT: ret i32 [[VCMPD_I]] |
| // |
| uint32_t test_vcgts_f32(float32_t a, float32_t b) { |
| return (uint32_t)vcgts_f32(a, b); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local i64 @test_vcgtd_f64( |
| // UNCONSTRAINED-SAME: double noundef [[A:%.*]], double noundef [[B:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[TMP0:%.*]] = fcmp ogt double [[A]], [[B]] |
| // UNCONSTRAINED-NEXT: [[VCMPD_I:%.*]] = sext i1 [[TMP0]] to i64 |
| // UNCONSTRAINED-NEXT: ret i64 [[VCMPD_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local i64 @test_vcgtd_f64( |
| // CONSTRAINED-SAME: double noundef [[A:%.*]], double noundef [[B:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[TMP0:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double [[A]], double [[B]], metadata !"ogt", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: [[VCMPD_I:%.*]] = sext i1 [[TMP0]] to i64 |
| // CONSTRAINED-NEXT: ret i64 [[VCMPD_I]] |
| // |
| uint64_t test_vcgtd_f64(float64_t a, float64_t b) { |
| return (uint64_t)vcgtd_f64(a, b); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local i32 @test_vcgtzs_f32( |
| // UNCONSTRAINED-SAME: float noundef [[A:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[TMP0:%.*]] = fcmp ogt float [[A]], 0.000000e+00 |
| // UNCONSTRAINED-NEXT: [[VCGTZ_I:%.*]] = sext i1 [[TMP0]] to i32 |
| // UNCONSTRAINED-NEXT: ret i32 [[VCGTZ_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local i32 @test_vcgtzs_f32( |
| // CONSTRAINED-SAME: float noundef [[A:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[TMP0:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f32(float [[A]], float 0.000000e+00, metadata !"ogt", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: [[VCGTZ_I:%.*]] = sext i1 [[TMP0]] to i32 |
| // CONSTRAINED-NEXT: ret i32 [[VCGTZ_I]] |
| // |
| uint32_t test_vcgtzs_f32(float32_t a) { |
| return (uint32_t)vcgtzs_f32(a); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local i64 @test_vcgtzd_f64( |
| // UNCONSTRAINED-SAME: double noundef [[A:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[TMP0:%.*]] = fcmp ogt double [[A]], 0.000000e+00 |
| // UNCONSTRAINED-NEXT: [[VCGTZ_I:%.*]] = sext i1 [[TMP0]] to i64 |
| // UNCONSTRAINED-NEXT: ret i64 [[VCGTZ_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local i64 @test_vcgtzd_f64( |
| // CONSTRAINED-SAME: double noundef [[A:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[TMP0:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double [[A]], double 0.000000e+00, metadata !"ogt", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: [[VCGTZ_I:%.*]] = sext i1 [[TMP0]] to i64 |
| // CONSTRAINED-NEXT: ret i64 [[VCGTZ_I]] |
| // |
| uint64_t test_vcgtzd_f64(float64_t a) { |
| return (uint64_t)vcgtzd_f64(a); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local i32 @test_vcles_f32( |
| // UNCONSTRAINED-SAME: float noundef [[A:%.*]], float noundef [[B:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[TMP0:%.*]] = fcmp ole float [[A]], [[B]] |
| // UNCONSTRAINED-NEXT: [[VCMPD_I:%.*]] = sext i1 [[TMP0]] to i32 |
| // UNCONSTRAINED-NEXT: ret i32 [[VCMPD_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local i32 @test_vcles_f32( |
| // CONSTRAINED-SAME: float noundef [[A:%.*]], float noundef [[B:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[TMP0:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f32(float [[A]], float [[B]], metadata !"ole", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: [[VCMPD_I:%.*]] = sext i1 [[TMP0]] to i32 |
| // CONSTRAINED-NEXT: ret i32 [[VCMPD_I]] |
| // |
| uint32_t test_vcles_f32(float32_t a, float32_t b) { |
| return (uint32_t)vcles_f32(a, b); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local i64 @test_vcled_f64( |
| // UNCONSTRAINED-SAME: double noundef [[A:%.*]], double noundef [[B:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[TMP0:%.*]] = fcmp ole double [[A]], [[B]] |
| // UNCONSTRAINED-NEXT: [[VCMPD_I:%.*]] = sext i1 [[TMP0]] to i64 |
| // UNCONSTRAINED-NEXT: ret i64 [[VCMPD_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local i64 @test_vcled_f64( |
| // CONSTRAINED-SAME: double noundef [[A:%.*]], double noundef [[B:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[TMP0:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double [[A]], double [[B]], metadata !"ole", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: [[VCMPD_I:%.*]] = sext i1 [[TMP0]] to i64 |
| // CONSTRAINED-NEXT: ret i64 [[VCMPD_I]] |
| // |
| uint64_t test_vcled_f64(float64_t a, float64_t b) { |
| return (uint64_t)vcled_f64(a, b); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local i32 @test_vclezs_f32( |
| // UNCONSTRAINED-SAME: float noundef [[A:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[TMP0:%.*]] = fcmp ole float [[A]], 0.000000e+00 |
| // UNCONSTRAINED-NEXT: [[VCLEZ_I:%.*]] = sext i1 [[TMP0]] to i32 |
| // UNCONSTRAINED-NEXT: ret i32 [[VCLEZ_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local i32 @test_vclezs_f32( |
| // CONSTRAINED-SAME: float noundef [[A:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[TMP0:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f32(float [[A]], float 0.000000e+00, metadata !"ole", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: [[VCLEZ_I:%.*]] = sext i1 [[TMP0]] to i32 |
| // CONSTRAINED-NEXT: ret i32 [[VCLEZ_I]] |
| // |
| uint32_t test_vclezs_f32(float32_t a) { |
| return (uint32_t)vclezs_f32(a); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local i64 @test_vclezd_f64( |
| // UNCONSTRAINED-SAME: double noundef [[A:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[TMP0:%.*]] = fcmp ole double [[A]], 0.000000e+00 |
| // UNCONSTRAINED-NEXT: [[VCLEZ_I:%.*]] = sext i1 [[TMP0]] to i64 |
| // UNCONSTRAINED-NEXT: ret i64 [[VCLEZ_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local i64 @test_vclezd_f64( |
| // CONSTRAINED-SAME: double noundef [[A:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[TMP0:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double [[A]], double 0.000000e+00, metadata !"ole", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: [[VCLEZ_I:%.*]] = sext i1 [[TMP0]] to i64 |
| // CONSTRAINED-NEXT: ret i64 [[VCLEZ_I]] |
| // |
| uint64_t test_vclezd_f64(float64_t a) { |
| return (uint64_t)vclezd_f64(a); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local i32 @test_vclts_f32( |
| // UNCONSTRAINED-SAME: float noundef [[A:%.*]], float noundef [[B:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[TMP0:%.*]] = fcmp olt float [[A]], [[B]] |
| // UNCONSTRAINED-NEXT: [[VCMPD_I:%.*]] = sext i1 [[TMP0]] to i32 |
| // UNCONSTRAINED-NEXT: ret i32 [[VCMPD_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local i32 @test_vclts_f32( |
| // CONSTRAINED-SAME: float noundef [[A:%.*]], float noundef [[B:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[TMP0:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f32(float [[A]], float [[B]], metadata !"olt", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: [[VCMPD_I:%.*]] = sext i1 [[TMP0]] to i32 |
| // CONSTRAINED-NEXT: ret i32 [[VCMPD_I]] |
| // |
| uint32_t test_vclts_f32(float32_t a, float32_t b) { |
| return (uint32_t)vclts_f32(a, b); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local i64 @test_vcltd_f64( |
| // UNCONSTRAINED-SAME: double noundef [[A:%.*]], double noundef [[B:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[TMP0:%.*]] = fcmp olt double [[A]], [[B]] |
| // UNCONSTRAINED-NEXT: [[VCMPD_I:%.*]] = sext i1 [[TMP0]] to i64 |
| // UNCONSTRAINED-NEXT: ret i64 [[VCMPD_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local i64 @test_vcltd_f64( |
| // CONSTRAINED-SAME: double noundef [[A:%.*]], double noundef [[B:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[TMP0:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double [[A]], double [[B]], metadata !"olt", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: [[VCMPD_I:%.*]] = sext i1 [[TMP0]] to i64 |
| // CONSTRAINED-NEXT: ret i64 [[VCMPD_I]] |
| // |
| uint64_t test_vcltd_f64(float64_t a, float64_t b) { |
| return (uint64_t)vcltd_f64(a, b); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local i32 @test_vcltzs_f32( |
| // UNCONSTRAINED-SAME: float noundef [[A:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[TMP0:%.*]] = fcmp olt float [[A]], 0.000000e+00 |
| // UNCONSTRAINED-NEXT: [[VCLTZ_I:%.*]] = sext i1 [[TMP0]] to i32 |
| // UNCONSTRAINED-NEXT: ret i32 [[VCLTZ_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local i32 @test_vcltzs_f32( |
| // CONSTRAINED-SAME: float noundef [[A:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[TMP0:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f32(float [[A]], float 0.000000e+00, metadata !"olt", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: [[VCLTZ_I:%.*]] = sext i1 [[TMP0]] to i32 |
| // CONSTRAINED-NEXT: ret i32 [[VCLTZ_I]] |
| // |
| uint32_t test_vcltzs_f32(float32_t a) { |
| return (uint32_t)vcltzs_f32(a); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local i64 @test_vcltzd_f64( |
| // UNCONSTRAINED-SAME: double noundef [[A:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[TMP0:%.*]] = fcmp olt double [[A]], 0.000000e+00 |
| // UNCONSTRAINED-NEXT: [[VCLTZ_I:%.*]] = sext i1 [[TMP0]] to i64 |
| // UNCONSTRAINED-NEXT: ret i64 [[VCLTZ_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local i64 @test_vcltzd_f64( |
| // CONSTRAINED-SAME: double noundef [[A:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[TMP0:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double [[A]], double 0.000000e+00, metadata !"olt", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: [[VCLTZ_I:%.*]] = sext i1 [[TMP0]] to i64 |
| // CONSTRAINED-NEXT: ret i64 [[VCLTZ_I]] |
| // |
| uint64_t test_vcltzd_f64(float64_t a) { |
| return (uint64_t)vcltzd_f64(a); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local <1 x double> @test_vadd_f64( |
| // UNCONSTRAINED-SAME: <1 x double> noundef [[A:%.*]], <1 x double> noundef [[B:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[ADD_I:%.*]] = fadd <1 x double> [[A]], [[B]] |
| // UNCONSTRAINED-NEXT: ret <1 x double> [[ADD_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local <1 x double> @test_vadd_f64( |
| // CONSTRAINED-SAME: <1 x double> noundef [[A:%.*]], <1 x double> noundef [[B:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[ADD_I:%.*]] = call <1 x double> @llvm.experimental.constrained.fadd.v1f64(<1 x double> [[A]], <1 x double> [[B]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: ret <1 x double> [[ADD_I]] |
| // |
| float64x1_t test_vadd_f64(float64x1_t a, float64x1_t b) { |
| return vadd_f64(a, b); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local <1 x double> @test_vmul_f64( |
| // UNCONSTRAINED-SAME: <1 x double> noundef [[A:%.*]], <1 x double> noundef [[B:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[MUL_I:%.*]] = fmul <1 x double> [[A]], [[B]] |
| // UNCONSTRAINED-NEXT: ret <1 x double> [[MUL_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local <1 x double> @test_vmul_f64( |
| // CONSTRAINED-SAME: <1 x double> noundef [[A:%.*]], <1 x double> noundef [[B:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[MUL_I:%.*]] = call <1 x double> @llvm.experimental.constrained.fmul.v1f64(<1 x double> [[A]], <1 x double> [[B]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: ret <1 x double> [[MUL_I]] |
| // |
| float64x1_t test_vmul_f64(float64x1_t a, float64x1_t b) { |
| return vmul_f64(a, b); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local <1 x double> @test_vdiv_f64( |
| // UNCONSTRAINED-SAME: <1 x double> noundef [[A:%.*]], <1 x double> noundef [[B:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[DIV_I:%.*]] = fdiv <1 x double> [[A]], [[B]] |
| // UNCONSTRAINED-NEXT: ret <1 x double> [[DIV_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local <1 x double> @test_vdiv_f64( |
| // CONSTRAINED-SAME: <1 x double> noundef [[A:%.*]], <1 x double> noundef [[B:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[DIV_I:%.*]] = call <1 x double> @llvm.experimental.constrained.fdiv.v1f64(<1 x double> [[A]], <1 x double> [[B]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: ret <1 x double> [[DIV_I]] |
| // |
| float64x1_t test_vdiv_f64(float64x1_t a, float64x1_t b) { |
| return vdiv_f64(a, b); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local <1 x double> @test_vmla_f64( |
| // UNCONSTRAINED-SAME: <1 x double> noundef [[A:%.*]], <1 x double> noundef [[B:%.*]], <1 x double> noundef [[C:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[MUL_I:%.*]] = fmul <1 x double> [[B]], [[C]] |
| // UNCONSTRAINED-NEXT: [[ADD_I:%.*]] = fadd <1 x double> [[A]], [[MUL_I]] |
| // UNCONSTRAINED-NEXT: ret <1 x double> [[ADD_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local <1 x double> @test_vmla_f64( |
| // CONSTRAINED-SAME: <1 x double> noundef [[A:%.*]], <1 x double> noundef [[B:%.*]], <1 x double> noundef [[C:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[MUL_I:%.*]] = call <1 x double> @llvm.experimental.constrained.fmul.v1f64(<1 x double> [[B]], <1 x double> [[C]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: [[ADD_I:%.*]] = call <1 x double> @llvm.experimental.constrained.fadd.v1f64(<1 x double> [[A]], <1 x double> [[MUL_I]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: ret <1 x double> [[ADD_I]] |
| // |
| float64x1_t test_vmla_f64(float64x1_t a, float64x1_t b, float64x1_t c) { |
| return vmla_f64(a, b, c); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local <1 x double> @test_vmls_f64( |
| // UNCONSTRAINED-SAME: <1 x double> noundef [[A:%.*]], <1 x double> noundef [[B:%.*]], <1 x double> noundef [[C:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[MUL_I:%.*]] = fmul <1 x double> [[B]], [[C]] |
| // UNCONSTRAINED-NEXT: [[SUB_I:%.*]] = fsub <1 x double> [[A]], [[MUL_I]] |
| // UNCONSTRAINED-NEXT: ret <1 x double> [[SUB_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local <1 x double> @test_vmls_f64( |
| // CONSTRAINED-SAME: <1 x double> noundef [[A:%.*]], <1 x double> noundef [[B:%.*]], <1 x double> noundef [[C:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[MUL_I:%.*]] = call <1 x double> @llvm.experimental.constrained.fmul.v1f64(<1 x double> [[B]], <1 x double> [[C]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: [[SUB_I:%.*]] = call <1 x double> @llvm.experimental.constrained.fsub.v1f64(<1 x double> [[A]], <1 x double> [[MUL_I]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: ret <1 x double> [[SUB_I]] |
| // |
| float64x1_t test_vmls_f64(float64x1_t a, float64x1_t b, float64x1_t c) { |
| return vmls_f64(a, b, c); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local <1 x double> @test_vfma_f64( |
| // UNCONSTRAINED-SAME: <1 x double> noundef [[A:%.*]], <1 x double> noundef [[B:%.*]], <1 x double> noundef [[C:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[TMP0:%.*]] = bitcast <1 x double> [[A]] to i64 |
| // UNCONSTRAINED-NEXT: [[__P0_ADDR_I_SROA_0_0_VEC_INSERT:%.*]] = insertelement <1 x i64> undef, i64 [[TMP0]], i32 0 |
| // UNCONSTRAINED-NEXT: [[TMP1:%.*]] = bitcast <1 x double> [[B]] to i64 |
| // UNCONSTRAINED-NEXT: [[__P1_ADDR_I_SROA_0_0_VEC_INSERT:%.*]] = insertelement <1 x i64> undef, i64 [[TMP1]], i32 0 |
| // UNCONSTRAINED-NEXT: [[TMP2:%.*]] = bitcast <1 x double> [[C]] to i64 |
| // UNCONSTRAINED-NEXT: [[__P2_ADDR_I_SROA_0_0_VEC_INSERT:%.*]] = insertelement <1 x i64> undef, i64 [[TMP2]], i32 0 |
| // UNCONSTRAINED-NEXT: [[TMP3:%.*]] = bitcast <1 x i64> [[__P0_ADDR_I_SROA_0_0_VEC_INSERT]] to <8 x i8> |
| // UNCONSTRAINED-NEXT: [[TMP4:%.*]] = bitcast <1 x i64> [[__P1_ADDR_I_SROA_0_0_VEC_INSERT]] to <8 x i8> |
| // UNCONSTRAINED-NEXT: [[TMP5:%.*]] = bitcast <1 x i64> [[__P2_ADDR_I_SROA_0_0_VEC_INSERT]] to <8 x i8> |
| // UNCONSTRAINED-NEXT: [[TMP6:%.*]] = bitcast <8 x i8> [[TMP3]] to <1 x double> |
| // UNCONSTRAINED-NEXT: [[TMP7:%.*]] = bitcast <8 x i8> [[TMP4]] to <1 x double> |
| // UNCONSTRAINED-NEXT: [[TMP8:%.*]] = bitcast <8 x i8> [[TMP5]] to <1 x double> |
| // UNCONSTRAINED-NEXT: [[TMP9:%.*]] = call <1 x double> @llvm.fma.v1f64(<1 x double> [[TMP7]], <1 x double> [[TMP8]], <1 x double> [[TMP6]]) |
| // UNCONSTRAINED-NEXT: ret <1 x double> [[TMP9]] |
| // |
| // CONSTRAINED-LABEL: define dso_local <1 x double> @test_vfma_f64( |
| // CONSTRAINED-SAME: <1 x double> noundef [[A:%.*]], <1 x double> noundef [[B:%.*]], <1 x double> noundef [[C:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[TMP0:%.*]] = bitcast <1 x double> [[A]] to i64 |
| // CONSTRAINED-NEXT: [[__P0_ADDR_I_SROA_0_0_VEC_INSERT:%.*]] = insertelement <1 x i64> undef, i64 [[TMP0]], i32 0 |
| // CONSTRAINED-NEXT: [[TMP1:%.*]] = bitcast <1 x double> [[B]] to i64 |
| // CONSTRAINED-NEXT: [[__P1_ADDR_I_SROA_0_0_VEC_INSERT:%.*]] = insertelement <1 x i64> undef, i64 [[TMP1]], i32 0 |
| // CONSTRAINED-NEXT: [[TMP2:%.*]] = bitcast <1 x double> [[C]] to i64 |
| // CONSTRAINED-NEXT: [[__P2_ADDR_I_SROA_0_0_VEC_INSERT:%.*]] = insertelement <1 x i64> undef, i64 [[TMP2]], i32 0 |
| // CONSTRAINED-NEXT: [[TMP3:%.*]] = bitcast <1 x i64> [[__P0_ADDR_I_SROA_0_0_VEC_INSERT]] to <8 x i8> |
| // CONSTRAINED-NEXT: [[TMP4:%.*]] = bitcast <1 x i64> [[__P1_ADDR_I_SROA_0_0_VEC_INSERT]] to <8 x i8> |
| // CONSTRAINED-NEXT: [[TMP5:%.*]] = bitcast <1 x i64> [[__P2_ADDR_I_SROA_0_0_VEC_INSERT]] to <8 x i8> |
| // CONSTRAINED-NEXT: [[TMP6:%.*]] = bitcast <8 x i8> [[TMP3]] to <1 x double> |
| // CONSTRAINED-NEXT: [[TMP7:%.*]] = bitcast <8 x i8> [[TMP4]] to <1 x double> |
| // CONSTRAINED-NEXT: [[TMP8:%.*]] = bitcast <8 x i8> [[TMP5]] to <1 x double> |
| // CONSTRAINED-NEXT: [[TMP9:%.*]] = call <1 x double> @llvm.experimental.constrained.fma.v1f64(<1 x double> [[TMP7]], <1 x double> [[TMP8]], <1 x double> [[TMP6]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: ret <1 x double> [[TMP9]] |
| // |
| float64x1_t test_vfma_f64(float64x1_t a, float64x1_t b, float64x1_t c) { |
| return vfma_f64(a, b, c); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local <1 x double> @test_vfms_f64( |
| // UNCONSTRAINED-SAME: <1 x double> noundef [[A:%.*]], <1 x double> noundef [[B:%.*]], <1 x double> noundef [[C:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[FNEG_I:%.*]] = fneg <1 x double> [[B]] |
| // UNCONSTRAINED-NEXT: [[TMP0:%.*]] = bitcast <1 x double> [[A]] to i64 |
| // UNCONSTRAINED-NEXT: [[__P0_ADDR_I_I_SROA_0_0_VEC_INSERT:%.*]] = insertelement <1 x i64> undef, i64 [[TMP0]], i32 0 |
| // UNCONSTRAINED-NEXT: [[TMP1:%.*]] = bitcast <1 x double> [[FNEG_I]] to i64 |
| // UNCONSTRAINED-NEXT: [[__P1_ADDR_I_I_SROA_0_0_VEC_INSERT:%.*]] = insertelement <1 x i64> undef, i64 [[TMP1]], i32 0 |
| // UNCONSTRAINED-NEXT: [[TMP2:%.*]] = bitcast <1 x double> [[C]] to i64 |
| // UNCONSTRAINED-NEXT: [[__P2_ADDR_I_I_SROA_0_0_VEC_INSERT:%.*]] = insertelement <1 x i64> undef, i64 [[TMP2]], i32 0 |
| // UNCONSTRAINED-NEXT: [[TMP3:%.*]] = bitcast <1 x i64> [[__P0_ADDR_I_I_SROA_0_0_VEC_INSERT]] to <8 x i8> |
| // UNCONSTRAINED-NEXT: [[TMP4:%.*]] = bitcast <1 x i64> [[__P1_ADDR_I_I_SROA_0_0_VEC_INSERT]] to <8 x i8> |
| // UNCONSTRAINED-NEXT: [[TMP5:%.*]] = bitcast <1 x i64> [[__P2_ADDR_I_I_SROA_0_0_VEC_INSERT]] to <8 x i8> |
| // UNCONSTRAINED-NEXT: [[TMP6:%.*]] = bitcast <8 x i8> [[TMP3]] to <1 x double> |
| // UNCONSTRAINED-NEXT: [[TMP7:%.*]] = bitcast <8 x i8> [[TMP4]] to <1 x double> |
| // UNCONSTRAINED-NEXT: [[TMP8:%.*]] = bitcast <8 x i8> [[TMP5]] to <1 x double> |
| // UNCONSTRAINED-NEXT: [[TMP9:%.*]] = call <1 x double> @llvm.fma.v1f64(<1 x double> [[TMP7]], <1 x double> [[TMP8]], <1 x double> [[TMP6]]) |
| // UNCONSTRAINED-NEXT: ret <1 x double> [[TMP9]] |
| // |
| // CONSTRAINED-LABEL: define dso_local <1 x double> @test_vfms_f64( |
| // CONSTRAINED-SAME: <1 x double> noundef [[A:%.*]], <1 x double> noundef [[B:%.*]], <1 x double> noundef [[C:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[FNEG_I:%.*]] = fneg <1 x double> [[B]] |
| // CONSTRAINED-NEXT: [[TMP0:%.*]] = bitcast <1 x double> [[A]] to i64 |
| // CONSTRAINED-NEXT: [[__P0_ADDR_I_I_SROA_0_0_VEC_INSERT:%.*]] = insertelement <1 x i64> undef, i64 [[TMP0]], i32 0 |
| // CONSTRAINED-NEXT: [[TMP1:%.*]] = bitcast <1 x double> [[FNEG_I]] to i64 |
| // CONSTRAINED-NEXT: [[__P1_ADDR_I_I_SROA_0_0_VEC_INSERT:%.*]] = insertelement <1 x i64> undef, i64 [[TMP1]], i32 0 |
| // CONSTRAINED-NEXT: [[TMP2:%.*]] = bitcast <1 x double> [[C]] to i64 |
| // CONSTRAINED-NEXT: [[__P2_ADDR_I_I_SROA_0_0_VEC_INSERT:%.*]] = insertelement <1 x i64> undef, i64 [[TMP2]], i32 0 |
| // CONSTRAINED-NEXT: [[TMP3:%.*]] = bitcast <1 x i64> [[__P0_ADDR_I_I_SROA_0_0_VEC_INSERT]] to <8 x i8> |
| // CONSTRAINED-NEXT: [[TMP4:%.*]] = bitcast <1 x i64> [[__P1_ADDR_I_I_SROA_0_0_VEC_INSERT]] to <8 x i8> |
| // CONSTRAINED-NEXT: [[TMP5:%.*]] = bitcast <1 x i64> [[__P2_ADDR_I_I_SROA_0_0_VEC_INSERT]] to <8 x i8> |
| // CONSTRAINED-NEXT: [[TMP6:%.*]] = bitcast <8 x i8> [[TMP3]] to <1 x double> |
| // CONSTRAINED-NEXT: [[TMP7:%.*]] = bitcast <8 x i8> [[TMP4]] to <1 x double> |
| // CONSTRAINED-NEXT: [[TMP8:%.*]] = bitcast <8 x i8> [[TMP5]] to <1 x double> |
| // CONSTRAINED-NEXT: [[TMP9:%.*]] = call <1 x double> @llvm.experimental.constrained.fma.v1f64(<1 x double> [[TMP7]], <1 x double> [[TMP8]], <1 x double> [[TMP6]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: ret <1 x double> [[TMP9]] |
| // |
| float64x1_t test_vfms_f64(float64x1_t a, float64x1_t b, float64x1_t c) { |
| return vfms_f64(a, b, c); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local <1 x double> @test_vsub_f64( |
| // UNCONSTRAINED-SAME: <1 x double> noundef [[A:%.*]], <1 x double> noundef [[B:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[SUB_I:%.*]] = fsub <1 x double> [[A]], [[B]] |
| // UNCONSTRAINED-NEXT: ret <1 x double> [[SUB_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local <1 x double> @test_vsub_f64( |
| // CONSTRAINED-SAME: <1 x double> noundef [[A:%.*]], <1 x double> noundef [[B:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[SUB_I:%.*]] = call <1 x double> @llvm.experimental.constrained.fsub.v1f64(<1 x double> [[A]], <1 x double> [[B]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: ret <1 x double> [[SUB_I]] |
| // |
| float64x1_t test_vsub_f64(float64x1_t a, float64x1_t b) { |
| return vsub_f64(a, b); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local <1 x i64> @test_vcvt_s64_f64( |
| // UNCONSTRAINED-SAME: <1 x double> noundef [[A:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[TMP0:%.*]] = bitcast <1 x double> [[A]] to i64 |
| // UNCONSTRAINED-NEXT: [[__P0_ADDR_I_SROA_0_0_VEC_INSERT:%.*]] = insertelement <1 x i64> undef, i64 [[TMP0]], i32 0 |
| // UNCONSTRAINED-NEXT: [[TMP1:%.*]] = bitcast <1 x i64> [[__P0_ADDR_I_SROA_0_0_VEC_INSERT]] to <8 x i8> |
| // UNCONSTRAINED-NEXT: [[VCVTZ_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x double> |
| // UNCONSTRAINED-NEXT: [[VCVTZ1_I:%.*]] = call <1 x i64> @llvm.aarch64.neon.fcvtzs.v1i64.v1f64(<1 x double> [[VCVTZ_I]]) |
| // UNCONSTRAINED-NEXT: ret <1 x i64> [[VCVTZ1_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local <1 x i64> @test_vcvt_s64_f64( |
| // CONSTRAINED-SAME: <1 x double> noundef [[A:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[TMP0:%.*]] = bitcast <1 x double> [[A]] to i64 |
| // CONSTRAINED-NEXT: [[__P0_ADDR_I_SROA_0_0_VEC_INSERT:%.*]] = insertelement <1 x i64> undef, i64 [[TMP0]], i32 0 |
| // CONSTRAINED-NEXT: [[TMP1:%.*]] = bitcast <1 x i64> [[__P0_ADDR_I_SROA_0_0_VEC_INSERT]] to <8 x i8> |
| // CONSTRAINED-NEXT: [[VCVTZ_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x double> |
| // CONSTRAINED-NEXT: [[VCVTZ1_I:%.*]] = call <1 x i64> @llvm.aarch64.neon.fcvtzs.v1i64.v1f64(<1 x double> [[VCVTZ_I]]) #[[ATTR3]] |
| // CONSTRAINED-NEXT: ret <1 x i64> [[VCVTZ1_I]] |
| // |
| int64x1_t test_vcvt_s64_f64(float64x1_t a) { |
| return vcvt_s64_f64(a); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local <1 x i64> @test_vcvt_u64_f64( |
| // UNCONSTRAINED-SAME: <1 x double> noundef [[A:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[TMP0:%.*]] = bitcast <1 x double> [[A]] to i64 |
| // UNCONSTRAINED-NEXT: [[__P0_ADDR_I_SROA_0_0_VEC_INSERT:%.*]] = insertelement <1 x i64> undef, i64 [[TMP0]], i32 0 |
| // UNCONSTRAINED-NEXT: [[TMP1:%.*]] = bitcast <1 x i64> [[__P0_ADDR_I_SROA_0_0_VEC_INSERT]] to <8 x i8> |
| // UNCONSTRAINED-NEXT: [[VCVTZ_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x double> |
| // UNCONSTRAINED-NEXT: [[VCVTZ1_I:%.*]] = call <1 x i64> @llvm.aarch64.neon.fcvtzu.v1i64.v1f64(<1 x double> [[VCVTZ_I]]) |
| // UNCONSTRAINED-NEXT: ret <1 x i64> [[VCVTZ1_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local <1 x i64> @test_vcvt_u64_f64( |
| // CONSTRAINED-SAME: <1 x double> noundef [[A:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[TMP0:%.*]] = bitcast <1 x double> [[A]] to i64 |
| // CONSTRAINED-NEXT: [[__P0_ADDR_I_SROA_0_0_VEC_INSERT:%.*]] = insertelement <1 x i64> undef, i64 [[TMP0]], i32 0 |
| // CONSTRAINED-NEXT: [[TMP1:%.*]] = bitcast <1 x i64> [[__P0_ADDR_I_SROA_0_0_VEC_INSERT]] to <8 x i8> |
| // CONSTRAINED-NEXT: [[VCVTZ_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x double> |
| // CONSTRAINED-NEXT: [[VCVTZ1_I:%.*]] = call <1 x i64> @llvm.aarch64.neon.fcvtzu.v1i64.v1f64(<1 x double> [[VCVTZ_I]]) #[[ATTR3]] |
| // CONSTRAINED-NEXT: ret <1 x i64> [[VCVTZ1_I]] |
| // |
| uint64x1_t test_vcvt_u64_f64(float64x1_t a) { |
| return vcvt_u64_f64(a); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local <1 x double> @test_vcvt_f64_s64( |
| // UNCONSTRAINED-SAME: <1 x i64> noundef [[A:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <8 x i8> |
| // UNCONSTRAINED-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> |
| // UNCONSTRAINED-NEXT: [[VCVT_I:%.*]] = sitofp <1 x i64> [[TMP1]] to <1 x double> |
| // UNCONSTRAINED-NEXT: ret <1 x double> [[VCVT_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local <1 x double> @test_vcvt_f64_s64( |
| // CONSTRAINED-SAME: <1 x i64> noundef [[A:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <8 x i8> |
| // CONSTRAINED-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> |
| // CONSTRAINED-NEXT: [[VCVT_I:%.*]] = call <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i64(<1 x i64> [[TMP1]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: ret <1 x double> [[VCVT_I]] |
| // |
| float64x1_t test_vcvt_f64_s64(int64x1_t a) { |
| return vcvt_f64_s64(a); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local <1 x double> @test_vcvt_f64_u64( |
| // UNCONSTRAINED-SAME: <1 x i64> noundef [[A:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <8 x i8> |
| // UNCONSTRAINED-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> |
| // UNCONSTRAINED-NEXT: [[VCVT_I:%.*]] = uitofp <1 x i64> [[TMP1]] to <1 x double> |
| // UNCONSTRAINED-NEXT: ret <1 x double> [[VCVT_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local <1 x double> @test_vcvt_f64_u64( |
| // CONSTRAINED-SAME: <1 x i64> noundef [[A:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <8 x i8> |
| // CONSTRAINED-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> |
| // CONSTRAINED-NEXT: [[VCVT_I:%.*]] = call <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i64(<1 x i64> [[TMP1]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: ret <1 x double> [[VCVT_I]] |
| // |
| float64x1_t test_vcvt_f64_u64(uint64x1_t a) { |
| return vcvt_f64_u64(a); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local <1 x double> @test_vrnda_f64( |
| // UNCONSTRAINED-SAME: <1 x double> noundef [[A:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[TMP0:%.*]] = bitcast <1 x double> [[A]] to i64 |
| // UNCONSTRAINED-NEXT: [[__P0_ADDR_I_SROA_0_0_VEC_INSERT:%.*]] = insertelement <1 x i64> undef, i64 [[TMP0]], i32 0 |
| // UNCONSTRAINED-NEXT: [[TMP1:%.*]] = bitcast <1 x i64> [[__P0_ADDR_I_SROA_0_0_VEC_INSERT]] to <8 x i8> |
| // UNCONSTRAINED-NEXT: [[VRNDA_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x double> |
| // UNCONSTRAINED-NEXT: [[VRNDA1_I:%.*]] = call <1 x double> @llvm.round.v1f64(<1 x double> [[VRNDA_I]]) |
| // UNCONSTRAINED-NEXT: ret <1 x double> [[VRNDA1_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local <1 x double> @test_vrnda_f64( |
| // CONSTRAINED-SAME: <1 x double> noundef [[A:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[TMP0:%.*]] = bitcast <1 x double> [[A]] to i64 |
| // CONSTRAINED-NEXT: [[__P0_ADDR_I_SROA_0_0_VEC_INSERT:%.*]] = insertelement <1 x i64> undef, i64 [[TMP0]], i32 0 |
| // CONSTRAINED-NEXT: [[TMP1:%.*]] = bitcast <1 x i64> [[__P0_ADDR_I_SROA_0_0_VEC_INSERT]] to <8 x i8> |
| // CONSTRAINED-NEXT: [[VRNDA_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x double> |
| // CONSTRAINED-NEXT: [[VRNDA1_I:%.*]] = call <1 x double> @llvm.experimental.constrained.round.v1f64(<1 x double> [[VRNDA_I]], metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: ret <1 x double> [[VRNDA1_I]] |
| // |
| float64x1_t test_vrnda_f64(float64x1_t a) { |
| return vrnda_f64(a); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local <1 x double> @test_vrndp_f64( |
| // UNCONSTRAINED-SAME: <1 x double> noundef [[A:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[TMP0:%.*]] = bitcast <1 x double> [[A]] to i64 |
| // UNCONSTRAINED-NEXT: [[__P0_ADDR_I_SROA_0_0_VEC_INSERT:%.*]] = insertelement <1 x i64> undef, i64 [[TMP0]], i32 0 |
| // UNCONSTRAINED-NEXT: [[TMP1:%.*]] = bitcast <1 x i64> [[__P0_ADDR_I_SROA_0_0_VEC_INSERT]] to <8 x i8> |
| // UNCONSTRAINED-NEXT: [[VRNDP_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x double> |
| // UNCONSTRAINED-NEXT: [[VRNDP1_I:%.*]] = call <1 x double> @llvm.ceil.v1f64(<1 x double> [[VRNDP_I]]) |
| // UNCONSTRAINED-NEXT: ret <1 x double> [[VRNDP1_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local <1 x double> @test_vrndp_f64( |
| // CONSTRAINED-SAME: <1 x double> noundef [[A:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[TMP0:%.*]] = bitcast <1 x double> [[A]] to i64 |
| // CONSTRAINED-NEXT: [[__P0_ADDR_I_SROA_0_0_VEC_INSERT:%.*]] = insertelement <1 x i64> undef, i64 [[TMP0]], i32 0 |
| // CONSTRAINED-NEXT: [[TMP1:%.*]] = bitcast <1 x i64> [[__P0_ADDR_I_SROA_0_0_VEC_INSERT]] to <8 x i8> |
| // CONSTRAINED-NEXT: [[VRNDP_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x double> |
| // CONSTRAINED-NEXT: [[VRNDP1_I:%.*]] = call <1 x double> @llvm.experimental.constrained.ceil.v1f64(<1 x double> [[VRNDP_I]], metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: ret <1 x double> [[VRNDP1_I]] |
| // |
| float64x1_t test_vrndp_f64(float64x1_t a) { |
| return vrndp_f64(a); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local <1 x double> @test_vrndm_f64( |
| // UNCONSTRAINED-SAME: <1 x double> noundef [[A:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[TMP0:%.*]] = bitcast <1 x double> [[A]] to i64 |
| // UNCONSTRAINED-NEXT: [[__P0_ADDR_I_SROA_0_0_VEC_INSERT:%.*]] = insertelement <1 x i64> undef, i64 [[TMP0]], i32 0 |
| // UNCONSTRAINED-NEXT: [[TMP1:%.*]] = bitcast <1 x i64> [[__P0_ADDR_I_SROA_0_0_VEC_INSERT]] to <8 x i8> |
| // UNCONSTRAINED-NEXT: [[VRNDM_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x double> |
| // UNCONSTRAINED-NEXT: [[VRNDM1_I:%.*]] = call <1 x double> @llvm.floor.v1f64(<1 x double> [[VRNDM_I]]) |
| // UNCONSTRAINED-NEXT: ret <1 x double> [[VRNDM1_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local <1 x double> @test_vrndm_f64( |
| // CONSTRAINED-SAME: <1 x double> noundef [[A:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[TMP0:%.*]] = bitcast <1 x double> [[A]] to i64 |
| // CONSTRAINED-NEXT: [[__P0_ADDR_I_SROA_0_0_VEC_INSERT:%.*]] = insertelement <1 x i64> undef, i64 [[TMP0]], i32 0 |
| // CONSTRAINED-NEXT: [[TMP1:%.*]] = bitcast <1 x i64> [[__P0_ADDR_I_SROA_0_0_VEC_INSERT]] to <8 x i8> |
| // CONSTRAINED-NEXT: [[VRNDM_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x double> |
| // CONSTRAINED-NEXT: [[VRNDM1_I:%.*]] = call <1 x double> @llvm.experimental.constrained.floor.v1f64(<1 x double> [[VRNDM_I]], metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: ret <1 x double> [[VRNDM1_I]] |
| // |
| float64x1_t test_vrndm_f64(float64x1_t a) { |
| return vrndm_f64(a); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local <1 x double> @test_vrndx_f64( |
| // UNCONSTRAINED-SAME: <1 x double> noundef [[A:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[TMP0:%.*]] = bitcast <1 x double> [[A]] to i64 |
| // UNCONSTRAINED-NEXT: [[__P0_ADDR_I_SROA_0_0_VEC_INSERT:%.*]] = insertelement <1 x i64> undef, i64 [[TMP0]], i32 0 |
| // UNCONSTRAINED-NEXT: [[TMP1:%.*]] = bitcast <1 x i64> [[__P0_ADDR_I_SROA_0_0_VEC_INSERT]] to <8 x i8> |
| // UNCONSTRAINED-NEXT: [[VRNDX_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x double> |
| // UNCONSTRAINED-NEXT: [[VRNDX1_I:%.*]] = call <1 x double> @llvm.rint.v1f64(<1 x double> [[VRNDX_I]]) |
| // UNCONSTRAINED-NEXT: ret <1 x double> [[VRNDX1_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local <1 x double> @test_vrndx_f64( |
| // CONSTRAINED-SAME: <1 x double> noundef [[A:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[TMP0:%.*]] = bitcast <1 x double> [[A]] to i64 |
| // CONSTRAINED-NEXT: [[__P0_ADDR_I_SROA_0_0_VEC_INSERT:%.*]] = insertelement <1 x i64> undef, i64 [[TMP0]], i32 0 |
| // CONSTRAINED-NEXT: [[TMP1:%.*]] = bitcast <1 x i64> [[__P0_ADDR_I_SROA_0_0_VEC_INSERT]] to <8 x i8> |
| // CONSTRAINED-NEXT: [[VRNDX_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x double> |
| // CONSTRAINED-NEXT: [[VRNDX1_I:%.*]] = call <1 x double> @llvm.experimental.constrained.rint.v1f64(<1 x double> [[VRNDX_I]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: ret <1 x double> [[VRNDX1_I]] |
| // |
| float64x1_t test_vrndx_f64(float64x1_t a) { |
| return vrndx_f64(a); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local <1 x double> @test_vrnd_f64( |
| // UNCONSTRAINED-SAME: <1 x double> noundef [[A:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[TMP0:%.*]] = bitcast <1 x double> [[A]] to i64 |
| // UNCONSTRAINED-NEXT: [[__P0_ADDR_I_SROA_0_0_VEC_INSERT:%.*]] = insertelement <1 x i64> undef, i64 [[TMP0]], i32 0 |
| // UNCONSTRAINED-NEXT: [[TMP1:%.*]] = bitcast <1 x i64> [[__P0_ADDR_I_SROA_0_0_VEC_INSERT]] to <8 x i8> |
| // UNCONSTRAINED-NEXT: [[VRNDZ_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x double> |
| // UNCONSTRAINED-NEXT: [[VRNDZ1_I:%.*]] = call <1 x double> @llvm.trunc.v1f64(<1 x double> [[VRNDZ_I]]) |
| // UNCONSTRAINED-NEXT: ret <1 x double> [[VRNDZ1_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local <1 x double> @test_vrnd_f64( |
| // CONSTRAINED-SAME: <1 x double> noundef [[A:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[TMP0:%.*]] = bitcast <1 x double> [[A]] to i64 |
| // CONSTRAINED-NEXT: [[__P0_ADDR_I_SROA_0_0_VEC_INSERT:%.*]] = insertelement <1 x i64> undef, i64 [[TMP0]], i32 0 |
| // CONSTRAINED-NEXT: [[TMP1:%.*]] = bitcast <1 x i64> [[__P0_ADDR_I_SROA_0_0_VEC_INSERT]] to <8 x i8> |
| // CONSTRAINED-NEXT: [[VRNDZ_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x double> |
| // CONSTRAINED-NEXT: [[VRNDZ1_I:%.*]] = call <1 x double> @llvm.experimental.constrained.trunc.v1f64(<1 x double> [[VRNDZ_I]], metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: ret <1 x double> [[VRNDZ1_I]] |
| // |
| float64x1_t test_vrnd_f64(float64x1_t a) { |
| return vrnd_f64(a); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local <1 x double> @test_vrndi_f64( |
| // UNCONSTRAINED-SAME: <1 x double> noundef [[A:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[TMP0:%.*]] = bitcast <1 x double> [[A]] to i64 |
| // UNCONSTRAINED-NEXT: [[__P0_ADDR_I_SROA_0_0_VEC_INSERT:%.*]] = insertelement <1 x i64> undef, i64 [[TMP0]], i32 0 |
| // UNCONSTRAINED-NEXT: [[TMP1:%.*]] = bitcast <1 x i64> [[__P0_ADDR_I_SROA_0_0_VEC_INSERT]] to <8 x i8> |
| // UNCONSTRAINED-NEXT: [[VRNDI_V_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x double> |
| // UNCONSTRAINED-NEXT: [[VRNDI_V1_I:%.*]] = call <1 x double> @llvm.nearbyint.v1f64(<1 x double> [[VRNDI_V_I]]) |
| // UNCONSTRAINED-NEXT: ret <1 x double> [[VRNDI_V1_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local <1 x double> @test_vrndi_f64( |
| // CONSTRAINED-SAME: <1 x double> noundef [[A:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[TMP0:%.*]] = bitcast <1 x double> [[A]] to i64 |
| // CONSTRAINED-NEXT: [[__P0_ADDR_I_SROA_0_0_VEC_INSERT:%.*]] = insertelement <1 x i64> undef, i64 [[TMP0]], i32 0 |
| // CONSTRAINED-NEXT: [[TMP1:%.*]] = bitcast <1 x i64> [[__P0_ADDR_I_SROA_0_0_VEC_INSERT]] to <8 x i8> |
| // CONSTRAINED-NEXT: [[VRNDI_V_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x double> |
| // CONSTRAINED-NEXT: [[VRNDI_V1_I:%.*]] = call <1 x double> @llvm.experimental.constrained.nearbyint.v1f64(<1 x double> [[VRNDI_V_I]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: ret <1 x double> [[VRNDI_V1_I]] |
| // |
| float64x1_t test_vrndi_f64(float64x1_t a) { |
| return vrndi_f64(a); |
| } |
| |
| // UNCONSTRAINED-LABEL: define dso_local <1 x double> @test_vsqrt_f64( |
| // UNCONSTRAINED-SAME: <1 x double> noundef [[A:%.*]]) #[[ATTR0]] { |
| // UNCONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // UNCONSTRAINED-NEXT: [[TMP0:%.*]] = bitcast <1 x double> [[A]] to i64 |
| // UNCONSTRAINED-NEXT: [[__P0_ADDR_I_SROA_0_0_VEC_INSERT:%.*]] = insertelement <1 x i64> undef, i64 [[TMP0]], i32 0 |
| // UNCONSTRAINED-NEXT: [[TMP1:%.*]] = bitcast <1 x i64> [[__P0_ADDR_I_SROA_0_0_VEC_INSERT]] to <8 x i8> |
| // UNCONSTRAINED-NEXT: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x double> |
| // UNCONSTRAINED-NEXT: [[VSQRT_I:%.*]] = call <1 x double> @llvm.sqrt.v1f64(<1 x double> [[TMP2]]) |
| // UNCONSTRAINED-NEXT: ret <1 x double> [[VSQRT_I]] |
| // |
| // CONSTRAINED-LABEL: define dso_local <1 x double> @test_vsqrt_f64( |
| // CONSTRAINED-SAME: <1 x double> noundef [[A:%.*]]) #[[ATTR0]] { |
| // CONSTRAINED-NEXT: [[ENTRY:.*:]] |
| // CONSTRAINED-NEXT: [[TMP0:%.*]] = bitcast <1 x double> [[A]] to i64 |
| // CONSTRAINED-NEXT: [[__P0_ADDR_I_SROA_0_0_VEC_INSERT:%.*]] = insertelement <1 x i64> undef, i64 [[TMP0]], i32 0 |
| // CONSTRAINED-NEXT: [[TMP1:%.*]] = bitcast <1 x i64> [[__P0_ADDR_I_SROA_0_0_VEC_INSERT]] to <8 x i8> |
| // CONSTRAINED-NEXT: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x double> |
| // CONSTRAINED-NEXT: [[VSQRT_I:%.*]] = call <1 x double> @llvm.experimental.constrained.sqrt.v1f64(<1 x double> [[TMP2]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR3]] |
| // CONSTRAINED-NEXT: ret <1 x double> [[VSQRT_I]] |
| // |
| float64x1_t test_vsqrt_f64(float64x1_t a) { |
| return vsqrt_f64(a); |
| } |