blob: fd4e91eb5181afce8818d00cf923719a7db179a6 [file] [log] [blame]
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// RUN: %clang_cc1 -no-opaque-pointers -triple arm64-none-linux-gnu -target-feature +v8.2a -target-feature +neon -target-feature +fp16fml \
// RUN: -fallow-half-arguments-and-returns -disable-O0-optnone -emit-llvm -o - %s | opt -S -mem2reg | FileCheck %s
// REQUIRES: aarch64-registered-target
// Test AArch64 Armv8.2-A FP16 Fused Multiply-Add Long intrinsics
#include <arm_neon.h>
// Vector form
// CHECK-LABEL: @test_vfmlal_low_f16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x float> [[A:%.*]] to <8 x i8>
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x half> [[B:%.*]] to <8 x i8>
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <4 x half> [[C:%.*]] to <8 x i8>
// CHECK-NEXT: [[VFMLAL_LOW3_I:%.*]] = call <2 x float> @llvm.aarch64.neon.fmlal.v2f32.v4f16(<2 x float> [[A]], <4 x half> [[B]], <4 x half> [[C]])
// CHECK-NEXT: ret <2 x float> [[VFMLAL_LOW3_I]]
//
float32x2_t test_vfmlal_low_f16(float32x2_t a, float16x4_t b, float16x4_t c) {
return vfmlal_low_f16(a, b, c);
}
// CHECK-LABEL: @test_vfmlsl_low_f16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x float> [[A:%.*]] to <8 x i8>
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x half> [[B:%.*]] to <8 x i8>
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <4 x half> [[C:%.*]] to <8 x i8>
// CHECK-NEXT: [[VFMLSL_LOW3_I:%.*]] = call <2 x float> @llvm.aarch64.neon.fmlsl.v2f32.v4f16(<2 x float> [[A]], <4 x half> [[B]], <4 x half> [[C]])
// CHECK-NEXT: ret <2 x float> [[VFMLSL_LOW3_I]]
//
float32x2_t test_vfmlsl_low_f16(float32x2_t a, float16x4_t b, float16x4_t c) {
return vfmlsl_low_f16(a, b, c);
}
// CHECK-LABEL: @test_vfmlal_high_f16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x float> [[A:%.*]] to <8 x i8>
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x half> [[B:%.*]] to <8 x i8>
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <4 x half> [[C:%.*]] to <8 x i8>
// CHECK-NEXT: [[VFMLAL_HIGH3_I:%.*]] = call <2 x float> @llvm.aarch64.neon.fmlal2.v2f32.v4f16(<2 x float> [[A]], <4 x half> [[B]], <4 x half> [[C]])
// CHECK-NEXT: ret <2 x float> [[VFMLAL_HIGH3_I]]
//
float32x2_t test_vfmlal_high_f16(float32x2_t a, float16x4_t b, float16x4_t c) {
return vfmlal_high_f16(a, b, c);
}
// CHECK-LABEL: @test_vfmlsl_high_f16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x float> [[A:%.*]] to <8 x i8>
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x half> [[B:%.*]] to <8 x i8>
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <4 x half> [[C:%.*]] to <8 x i8>
// CHECK-NEXT: [[VFMLSL_HIGH3_I:%.*]] = call <2 x float> @llvm.aarch64.neon.fmlsl2.v2f32.v4f16(<2 x float> [[A]], <4 x half> [[B]], <4 x half> [[C]])
// CHECK-NEXT: ret <2 x float> [[VFMLSL_HIGH3_I]]
//
float32x2_t test_vfmlsl_high_f16(float32x2_t a, float16x4_t b, float16x4_t c) {
return vfmlsl_high_f16(a, b, c);
}
// CHECK-LABEL: @test_vfmlalq_low_f16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[A:%.*]] to <16 x i8>
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x half> [[B:%.*]] to <16 x i8>
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x half> [[C:%.*]] to <16 x i8>
// CHECK-NEXT: [[VFMLAL_LOW3_I:%.*]] = call <4 x float> @llvm.aarch64.neon.fmlal.v4f32.v8f16(<4 x float> [[A]], <8 x half> [[B]], <8 x half> [[C]])
// CHECK-NEXT: ret <4 x float> [[VFMLAL_LOW3_I]]
//
float32x4_t test_vfmlalq_low_f16(float32x4_t a, float16x8_t b, float16x8_t c) {
return vfmlalq_low_f16(a, b, c);
}
// CHECK-LABEL: @test_vfmlslq_low_f16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[A:%.*]] to <16 x i8>
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x half> [[B:%.*]] to <16 x i8>
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x half> [[C:%.*]] to <16 x i8>
// CHECK-NEXT: [[VFMLSL_LOW3_I:%.*]] = call <4 x float> @llvm.aarch64.neon.fmlsl.v4f32.v8f16(<4 x float> [[A]], <8 x half> [[B]], <8 x half> [[C]])
// CHECK-NEXT: ret <4 x float> [[VFMLSL_LOW3_I]]
//
float32x4_t test_vfmlslq_low_f16(float32x4_t a, float16x8_t b, float16x8_t c) {
return vfmlslq_low_f16(a, b, c);
}
// CHECK-LABEL: @test_vfmlalq_high_f16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[A:%.*]] to <16 x i8>
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x half> [[B:%.*]] to <16 x i8>
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x half> [[C:%.*]] to <16 x i8>
// CHECK-NEXT: [[VFMLAL_HIGH3_I:%.*]] = call <4 x float> @llvm.aarch64.neon.fmlal2.v4f32.v8f16(<4 x float> [[A]], <8 x half> [[B]], <8 x half> [[C]])
// CHECK-NEXT: ret <4 x float> [[VFMLAL_HIGH3_I]]
//
float32x4_t test_vfmlalq_high_f16(float32x4_t a, float16x8_t b, float16x8_t c) {
return vfmlalq_high_f16(a, b, c);
}
// CHECK-LABEL: @test_vfmlslq_high_f16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[A:%.*]] to <16 x i8>
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x half> [[B:%.*]] to <16 x i8>
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x half> [[C:%.*]] to <16 x i8>
// CHECK-NEXT: [[VFMLSL_HIGH3_I:%.*]] = call <4 x float> @llvm.aarch64.neon.fmlsl2.v4f32.v8f16(<4 x float> [[A]], <8 x half> [[B]], <8 x half> [[C]])
// CHECK-NEXT: ret <4 x float> [[VFMLSL_HIGH3_I]]
//
float32x4_t test_vfmlslq_high_f16(float32x4_t a, float16x8_t b, float16x8_t c) {
return vfmlslq_high_f16(a, b, c);
}
// Indexed form
// CHECK-LABEL: @test_vfmlal_lane_low_f16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[__REINT_851:%.*]] = alloca <4 x half>, align 8
// CHECK-NEXT: [[__REINT1_851:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_8514:%.*]] = alloca <4 x half>, align 8
// CHECK-NEXT: [[__REINT1_8515:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85114:%.*]] = alloca <4 x half>, align 8
// CHECK-NEXT: [[__REINT1_85115:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85124:%.*]] = alloca <4 x half>, align 8
// CHECK-NEXT: [[__REINT1_85125:%.*]] = alloca i16, align 2
// CHECK-NEXT: store <4 x half> [[C:%.*]], <4 x half>* [[__REINT_851]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x half>* [[__REINT_851]] to <4 x i16>*
// CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, <4 x i16>* [[TMP0]], align 8
// CHECK-NEXT: [[VGET_LANE:%.*]] = extractelement <4 x i16> [[TMP1]], i32 0
// CHECK-NEXT: store i16 [[VGET_LANE]], i16* [[__REINT1_851]], align 2
// CHECK-NEXT: [[TMP2:%.*]] = bitcast i16* [[__REINT1_851]] to half*
// CHECK-NEXT: [[TMP3:%.*]] = load half, half* [[TMP2]], align 2
// CHECK-NEXT: [[VECINIT:%.*]] = insertelement <4 x half> undef, half [[TMP3]], i32 0
// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_8514]], align 8
// CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x half>* [[__REINT_8514]] to <4 x i16>*
// CHECK-NEXT: [[TMP5:%.*]] = load <4 x i16>, <4 x i16>* [[TMP4]], align 8
// CHECK-NEXT: [[VGET_LANE8:%.*]] = extractelement <4 x i16> [[TMP5]], i32 0
// CHECK-NEXT: store i16 [[VGET_LANE8]], i16* [[__REINT1_8515]], align 2
// CHECK-NEXT: [[TMP6:%.*]] = bitcast i16* [[__REINT1_8515]] to half*
// CHECK-NEXT: [[TMP7:%.*]] = load half, half* [[TMP6]], align 2
// CHECK-NEXT: [[VECINIT11:%.*]] = insertelement <4 x half> [[VECINIT]], half [[TMP7]], i32 1
// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_85114]], align 8
// CHECK-NEXT: [[TMP8:%.*]] = bitcast <4 x half>* [[__REINT_85114]] to <4 x i16>*
// CHECK-NEXT: [[TMP9:%.*]] = load <4 x i16>, <4 x i16>* [[TMP8]], align 8
// CHECK-NEXT: [[VGET_LANE18:%.*]] = extractelement <4 x i16> [[TMP9]], i32 0
// CHECK-NEXT: store i16 [[VGET_LANE18]], i16* [[__REINT1_85115]], align 2
// CHECK-NEXT: [[TMP10:%.*]] = bitcast i16* [[__REINT1_85115]] to half*
// CHECK-NEXT: [[TMP11:%.*]] = load half, half* [[TMP10]], align 2
// CHECK-NEXT: [[VECINIT21:%.*]] = insertelement <4 x half> [[VECINIT11]], half [[TMP11]], i32 2
// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_85124]], align 8
// CHECK-NEXT: [[TMP12:%.*]] = bitcast <4 x half>* [[__REINT_85124]] to <4 x i16>*
// CHECK-NEXT: [[TMP13:%.*]] = load <4 x i16>, <4 x i16>* [[TMP12]], align 8
// CHECK-NEXT: [[VGET_LANE28:%.*]] = extractelement <4 x i16> [[TMP13]], i32 0
// CHECK-NEXT: store i16 [[VGET_LANE28]], i16* [[__REINT1_85125]], align 2
// CHECK-NEXT: [[TMP14:%.*]] = bitcast i16* [[__REINT1_85125]] to half*
// CHECK-NEXT: [[TMP15:%.*]] = load half, half* [[TMP14]], align 2
// CHECK-NEXT: [[VECINIT31:%.*]] = insertelement <4 x half> [[VECINIT21]], half [[TMP15]], i32 3
// CHECK-NEXT: [[TMP16:%.*]] = bitcast <2 x float> [[A:%.*]] to <8 x i8>
// CHECK-NEXT: [[TMP17:%.*]] = bitcast <4 x half> [[B:%.*]] to <8 x i8>
// CHECK-NEXT: [[TMP18:%.*]] = bitcast <4 x half> [[VECINIT31]] to <8 x i8>
// CHECK-NEXT: [[VFMLAL_LOW3_I:%.*]] = call <2 x float> @llvm.aarch64.neon.fmlal.v2f32.v4f16(<2 x float> [[A]], <4 x half> [[B]], <4 x half> [[VECINIT31]])
// CHECK-NEXT: ret <2 x float> [[VFMLAL_LOW3_I]]
//
float32x2_t test_vfmlal_lane_low_f16(float32x2_t a, float16x4_t b, float16x4_t c) {
return vfmlal_lane_low_f16(a, b, c, 0);
}
// CHECK-LABEL: @test_vfmlal_lane_high_f16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[__REINT_851:%.*]] = alloca <4 x half>, align 8
// CHECK-NEXT: [[__REINT1_851:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_8514:%.*]] = alloca <4 x half>, align 8
// CHECK-NEXT: [[__REINT1_8515:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85114:%.*]] = alloca <4 x half>, align 8
// CHECK-NEXT: [[__REINT1_85115:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85124:%.*]] = alloca <4 x half>, align 8
// CHECK-NEXT: [[__REINT1_85125:%.*]] = alloca i16, align 2
// CHECK-NEXT: store <4 x half> [[C:%.*]], <4 x half>* [[__REINT_851]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x half>* [[__REINT_851]] to <4 x i16>*
// CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, <4 x i16>* [[TMP0]], align 8
// CHECK-NEXT: [[VGET_LANE:%.*]] = extractelement <4 x i16> [[TMP1]], i32 1
// CHECK-NEXT: store i16 [[VGET_LANE]], i16* [[__REINT1_851]], align 2
// CHECK-NEXT: [[TMP2:%.*]] = bitcast i16* [[__REINT1_851]] to half*
// CHECK-NEXT: [[TMP3:%.*]] = load half, half* [[TMP2]], align 2
// CHECK-NEXT: [[VECINIT:%.*]] = insertelement <4 x half> undef, half [[TMP3]], i32 0
// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_8514]], align 8
// CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x half>* [[__REINT_8514]] to <4 x i16>*
// CHECK-NEXT: [[TMP5:%.*]] = load <4 x i16>, <4 x i16>* [[TMP4]], align 8
// CHECK-NEXT: [[VGET_LANE8:%.*]] = extractelement <4 x i16> [[TMP5]], i32 1
// CHECK-NEXT: store i16 [[VGET_LANE8]], i16* [[__REINT1_8515]], align 2
// CHECK-NEXT: [[TMP6:%.*]] = bitcast i16* [[__REINT1_8515]] to half*
// CHECK-NEXT: [[TMP7:%.*]] = load half, half* [[TMP6]], align 2
// CHECK-NEXT: [[VECINIT11:%.*]] = insertelement <4 x half> [[VECINIT]], half [[TMP7]], i32 1
// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_85114]], align 8
// CHECK-NEXT: [[TMP8:%.*]] = bitcast <4 x half>* [[__REINT_85114]] to <4 x i16>*
// CHECK-NEXT: [[TMP9:%.*]] = load <4 x i16>, <4 x i16>* [[TMP8]], align 8
// CHECK-NEXT: [[VGET_LANE18:%.*]] = extractelement <4 x i16> [[TMP9]], i32 1
// CHECK-NEXT: store i16 [[VGET_LANE18]], i16* [[__REINT1_85115]], align 2
// CHECK-NEXT: [[TMP10:%.*]] = bitcast i16* [[__REINT1_85115]] to half*
// CHECK-NEXT: [[TMP11:%.*]] = load half, half* [[TMP10]], align 2
// CHECK-NEXT: [[VECINIT21:%.*]] = insertelement <4 x half> [[VECINIT11]], half [[TMP11]], i32 2
// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_85124]], align 8
// CHECK-NEXT: [[TMP12:%.*]] = bitcast <4 x half>* [[__REINT_85124]] to <4 x i16>*
// CHECK-NEXT: [[TMP13:%.*]] = load <4 x i16>, <4 x i16>* [[TMP12]], align 8
// CHECK-NEXT: [[VGET_LANE28:%.*]] = extractelement <4 x i16> [[TMP13]], i32 1
// CHECK-NEXT: store i16 [[VGET_LANE28]], i16* [[__REINT1_85125]], align 2
// CHECK-NEXT: [[TMP14:%.*]] = bitcast i16* [[__REINT1_85125]] to half*
// CHECK-NEXT: [[TMP15:%.*]] = load half, half* [[TMP14]], align 2
// CHECK-NEXT: [[VECINIT31:%.*]] = insertelement <4 x half> [[VECINIT21]], half [[TMP15]], i32 3
// CHECK-NEXT: [[TMP16:%.*]] = bitcast <2 x float> [[A:%.*]] to <8 x i8>
// CHECK-NEXT: [[TMP17:%.*]] = bitcast <4 x half> [[B:%.*]] to <8 x i8>
// CHECK-NEXT: [[TMP18:%.*]] = bitcast <4 x half> [[VECINIT31]] to <8 x i8>
// CHECK-NEXT: [[VFMLAL_HIGH3_I:%.*]] = call <2 x float> @llvm.aarch64.neon.fmlal2.v2f32.v4f16(<2 x float> [[A]], <4 x half> [[B]], <4 x half> [[VECINIT31]])
// CHECK-NEXT: ret <2 x float> [[VFMLAL_HIGH3_I]]
//
float32x2_t test_vfmlal_lane_high_f16(float32x2_t a, float16x4_t b, float16x4_t c) {
return vfmlal_lane_high_f16(a, b, c, 1);
}
// CHECK-LABEL: @test_vfmlalq_lane_low_f16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[__REINT_851:%.*]] = alloca <4 x half>, align 8
// CHECK-NEXT: [[__REINT1_851:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_8514:%.*]] = alloca <4 x half>, align 8
// CHECK-NEXT: [[__REINT1_8515:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85114:%.*]] = alloca <4 x half>, align 8
// CHECK-NEXT: [[__REINT1_85115:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85124:%.*]] = alloca <4 x half>, align 8
// CHECK-NEXT: [[__REINT1_85125:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85134:%.*]] = alloca <4 x half>, align 8
// CHECK-NEXT: [[__REINT1_85135:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85144:%.*]] = alloca <4 x half>, align 8
// CHECK-NEXT: [[__REINT1_85145:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85154:%.*]] = alloca <4 x half>, align 8
// CHECK-NEXT: [[__REINT1_85155:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85164:%.*]] = alloca <4 x half>, align 8
// CHECK-NEXT: [[__REINT1_85165:%.*]] = alloca i16, align 2
// CHECK-NEXT: store <4 x half> [[C:%.*]], <4 x half>* [[__REINT_851]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x half>* [[__REINT_851]] to <4 x i16>*
// CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, <4 x i16>* [[TMP0]], align 8
// CHECK-NEXT: [[VGET_LANE:%.*]] = extractelement <4 x i16> [[TMP1]], i32 2
// CHECK-NEXT: store i16 [[VGET_LANE]], i16* [[__REINT1_851]], align 2
// CHECK-NEXT: [[TMP2:%.*]] = bitcast i16* [[__REINT1_851]] to half*
// CHECK-NEXT: [[TMP3:%.*]] = load half, half* [[TMP2]], align 2
// CHECK-NEXT: [[VECINIT:%.*]] = insertelement <8 x half> undef, half [[TMP3]], i32 0
// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_8514]], align 8
// CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x half>* [[__REINT_8514]] to <4 x i16>*
// CHECK-NEXT: [[TMP5:%.*]] = load <4 x i16>, <4 x i16>* [[TMP4]], align 8
// CHECK-NEXT: [[VGET_LANE8:%.*]] = extractelement <4 x i16> [[TMP5]], i32 2
// CHECK-NEXT: store i16 [[VGET_LANE8]], i16* [[__REINT1_8515]], align 2
// CHECK-NEXT: [[TMP6:%.*]] = bitcast i16* [[__REINT1_8515]] to half*
// CHECK-NEXT: [[TMP7:%.*]] = load half, half* [[TMP6]], align 2
// CHECK-NEXT: [[VECINIT11:%.*]] = insertelement <8 x half> [[VECINIT]], half [[TMP7]], i32 1
// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_85114]], align 8
// CHECK-NEXT: [[TMP8:%.*]] = bitcast <4 x half>* [[__REINT_85114]] to <4 x i16>*
// CHECK-NEXT: [[TMP9:%.*]] = load <4 x i16>, <4 x i16>* [[TMP8]], align 8
// CHECK-NEXT: [[VGET_LANE18:%.*]] = extractelement <4 x i16> [[TMP9]], i32 2
// CHECK-NEXT: store i16 [[VGET_LANE18]], i16* [[__REINT1_85115]], align 2
// CHECK-NEXT: [[TMP10:%.*]] = bitcast i16* [[__REINT1_85115]] to half*
// CHECK-NEXT: [[TMP11:%.*]] = load half, half* [[TMP10]], align 2
// CHECK-NEXT: [[VECINIT21:%.*]] = insertelement <8 x half> [[VECINIT11]], half [[TMP11]], i32 2
// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_85124]], align 8
// CHECK-NEXT: [[TMP12:%.*]] = bitcast <4 x half>* [[__REINT_85124]] to <4 x i16>*
// CHECK-NEXT: [[TMP13:%.*]] = load <4 x i16>, <4 x i16>* [[TMP12]], align 8
// CHECK-NEXT: [[VGET_LANE28:%.*]] = extractelement <4 x i16> [[TMP13]], i32 2
// CHECK-NEXT: store i16 [[VGET_LANE28]], i16* [[__REINT1_85125]], align 2
// CHECK-NEXT: [[TMP14:%.*]] = bitcast i16* [[__REINT1_85125]] to half*
// CHECK-NEXT: [[TMP15:%.*]] = load half, half* [[TMP14]], align 2
// CHECK-NEXT: [[VECINIT31:%.*]] = insertelement <8 x half> [[VECINIT21]], half [[TMP15]], i32 3
// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_85134]], align 8
// CHECK-NEXT: [[TMP16:%.*]] = bitcast <4 x half>* [[__REINT_85134]] to <4 x i16>*
// CHECK-NEXT: [[TMP17:%.*]] = load <4 x i16>, <4 x i16>* [[TMP16]], align 8
// CHECK-NEXT: [[VGET_LANE38:%.*]] = extractelement <4 x i16> [[TMP17]], i32 2
// CHECK-NEXT: store i16 [[VGET_LANE38]], i16* [[__REINT1_85135]], align 2
// CHECK-NEXT: [[TMP18:%.*]] = bitcast i16* [[__REINT1_85135]] to half*
// CHECK-NEXT: [[TMP19:%.*]] = load half, half* [[TMP18]], align 2
// CHECK-NEXT: [[VECINIT41:%.*]] = insertelement <8 x half> [[VECINIT31]], half [[TMP19]], i32 4
// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_85144]], align 8
// CHECK-NEXT: [[TMP20:%.*]] = bitcast <4 x half>* [[__REINT_85144]] to <4 x i16>*
// CHECK-NEXT: [[TMP21:%.*]] = load <4 x i16>, <4 x i16>* [[TMP20]], align 8
// CHECK-NEXT: [[VGET_LANE48:%.*]] = extractelement <4 x i16> [[TMP21]], i32 2
// CHECK-NEXT: store i16 [[VGET_LANE48]], i16* [[__REINT1_85145]], align 2
// CHECK-NEXT: [[TMP22:%.*]] = bitcast i16* [[__REINT1_85145]] to half*
// CHECK-NEXT: [[TMP23:%.*]] = load half, half* [[TMP22]], align 2
// CHECK-NEXT: [[VECINIT51:%.*]] = insertelement <8 x half> [[VECINIT41]], half [[TMP23]], i32 5
// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_85154]], align 8
// CHECK-NEXT: [[TMP24:%.*]] = bitcast <4 x half>* [[__REINT_85154]] to <4 x i16>*
// CHECK-NEXT: [[TMP25:%.*]] = load <4 x i16>, <4 x i16>* [[TMP24]], align 8
// CHECK-NEXT: [[VGET_LANE58:%.*]] = extractelement <4 x i16> [[TMP25]], i32 2
// CHECK-NEXT: store i16 [[VGET_LANE58]], i16* [[__REINT1_85155]], align 2
// CHECK-NEXT: [[TMP26:%.*]] = bitcast i16* [[__REINT1_85155]] to half*
// CHECK-NEXT: [[TMP27:%.*]] = load half, half* [[TMP26]], align 2
// CHECK-NEXT: [[VECINIT61:%.*]] = insertelement <8 x half> [[VECINIT51]], half [[TMP27]], i32 6
// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_85164]], align 8
// CHECK-NEXT: [[TMP28:%.*]] = bitcast <4 x half>* [[__REINT_85164]] to <4 x i16>*
// CHECK-NEXT: [[TMP29:%.*]] = load <4 x i16>, <4 x i16>* [[TMP28]], align 8
// CHECK-NEXT: [[VGET_LANE68:%.*]] = extractelement <4 x i16> [[TMP29]], i32 2
// CHECK-NEXT: store i16 [[VGET_LANE68]], i16* [[__REINT1_85165]], align 2
// CHECK-NEXT: [[TMP30:%.*]] = bitcast i16* [[__REINT1_85165]] to half*
// CHECK-NEXT: [[TMP31:%.*]] = load half, half* [[TMP30]], align 2
// CHECK-NEXT: [[VECINIT71:%.*]] = insertelement <8 x half> [[VECINIT61]], half [[TMP31]], i32 7
// CHECK-NEXT: [[TMP32:%.*]] = bitcast <4 x float> [[A:%.*]] to <16 x i8>
// CHECK-NEXT: [[TMP33:%.*]] = bitcast <8 x half> [[B:%.*]] to <16 x i8>
// CHECK-NEXT: [[TMP34:%.*]] = bitcast <8 x half> [[VECINIT71]] to <16 x i8>
// CHECK-NEXT: [[VFMLAL_LOW3_I:%.*]] = call <4 x float> @llvm.aarch64.neon.fmlal.v4f32.v8f16(<4 x float> [[A]], <8 x half> [[B]], <8 x half> [[VECINIT71]])
// CHECK-NEXT: ret <4 x float> [[VFMLAL_LOW3_I]]
//
float32x4_t test_vfmlalq_lane_low_f16(float32x4_t a, float16x8_t b, float16x4_t c) {
return vfmlalq_lane_low_f16(a, b, c, 2);
}
// CHECK-LABEL: @test_vfmlalq_lane_high_f16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[__REINT_851:%.*]] = alloca <4 x half>, align 8
// CHECK-NEXT: [[__REINT1_851:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_8514:%.*]] = alloca <4 x half>, align 8
// CHECK-NEXT: [[__REINT1_8515:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85114:%.*]] = alloca <4 x half>, align 8
// CHECK-NEXT: [[__REINT1_85115:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85124:%.*]] = alloca <4 x half>, align 8
// CHECK-NEXT: [[__REINT1_85125:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85134:%.*]] = alloca <4 x half>, align 8
// CHECK-NEXT: [[__REINT1_85135:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85144:%.*]] = alloca <4 x half>, align 8
// CHECK-NEXT: [[__REINT1_85145:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85154:%.*]] = alloca <4 x half>, align 8
// CHECK-NEXT: [[__REINT1_85155:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85164:%.*]] = alloca <4 x half>, align 8
// CHECK-NEXT: [[__REINT1_85165:%.*]] = alloca i16, align 2
// CHECK-NEXT: store <4 x half> [[C:%.*]], <4 x half>* [[__REINT_851]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x half>* [[__REINT_851]] to <4 x i16>*
// CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, <4 x i16>* [[TMP0]], align 8
// CHECK-NEXT: [[VGET_LANE:%.*]] = extractelement <4 x i16> [[TMP1]], i32 3
// CHECK-NEXT: store i16 [[VGET_LANE]], i16* [[__REINT1_851]], align 2
// CHECK-NEXT: [[TMP2:%.*]] = bitcast i16* [[__REINT1_851]] to half*
// CHECK-NEXT: [[TMP3:%.*]] = load half, half* [[TMP2]], align 2
// CHECK-NEXT: [[VECINIT:%.*]] = insertelement <8 x half> undef, half [[TMP3]], i32 0
// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_8514]], align 8
// CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x half>* [[__REINT_8514]] to <4 x i16>*
// CHECK-NEXT: [[TMP5:%.*]] = load <4 x i16>, <4 x i16>* [[TMP4]], align 8
// CHECK-NEXT: [[VGET_LANE8:%.*]] = extractelement <4 x i16> [[TMP5]], i32 3
// CHECK-NEXT: store i16 [[VGET_LANE8]], i16* [[__REINT1_8515]], align 2
// CHECK-NEXT: [[TMP6:%.*]] = bitcast i16* [[__REINT1_8515]] to half*
// CHECK-NEXT: [[TMP7:%.*]] = load half, half* [[TMP6]], align 2
// CHECK-NEXT: [[VECINIT11:%.*]] = insertelement <8 x half> [[VECINIT]], half [[TMP7]], i32 1
// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_85114]], align 8
// CHECK-NEXT: [[TMP8:%.*]] = bitcast <4 x half>* [[__REINT_85114]] to <4 x i16>*
// CHECK-NEXT: [[TMP9:%.*]] = load <4 x i16>, <4 x i16>* [[TMP8]], align 8
// CHECK-NEXT: [[VGET_LANE18:%.*]] = extractelement <4 x i16> [[TMP9]], i32 3
// CHECK-NEXT: store i16 [[VGET_LANE18]], i16* [[__REINT1_85115]], align 2
// CHECK-NEXT: [[TMP10:%.*]] = bitcast i16* [[__REINT1_85115]] to half*
// CHECK-NEXT: [[TMP11:%.*]] = load half, half* [[TMP10]], align 2
// CHECK-NEXT: [[VECINIT21:%.*]] = insertelement <8 x half> [[VECINIT11]], half [[TMP11]], i32 2
// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_85124]], align 8
// CHECK-NEXT: [[TMP12:%.*]] = bitcast <4 x half>* [[__REINT_85124]] to <4 x i16>*
// CHECK-NEXT: [[TMP13:%.*]] = load <4 x i16>, <4 x i16>* [[TMP12]], align 8
// CHECK-NEXT: [[VGET_LANE28:%.*]] = extractelement <4 x i16> [[TMP13]], i32 3
// CHECK-NEXT: store i16 [[VGET_LANE28]], i16* [[__REINT1_85125]], align 2
// CHECK-NEXT: [[TMP14:%.*]] = bitcast i16* [[__REINT1_85125]] to half*
// CHECK-NEXT: [[TMP15:%.*]] = load half, half* [[TMP14]], align 2
// CHECK-NEXT: [[VECINIT31:%.*]] = insertelement <8 x half> [[VECINIT21]], half [[TMP15]], i32 3
// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_85134]], align 8
// CHECK-NEXT: [[TMP16:%.*]] = bitcast <4 x half>* [[__REINT_85134]] to <4 x i16>*
// CHECK-NEXT: [[TMP17:%.*]] = load <4 x i16>, <4 x i16>* [[TMP16]], align 8
// CHECK-NEXT: [[VGET_LANE38:%.*]] = extractelement <4 x i16> [[TMP17]], i32 3
// CHECK-NEXT: store i16 [[VGET_LANE38]], i16* [[__REINT1_85135]], align 2
// CHECK-NEXT: [[TMP18:%.*]] = bitcast i16* [[__REINT1_85135]] to half*
// CHECK-NEXT: [[TMP19:%.*]] = load half, half* [[TMP18]], align 2
// CHECK-NEXT: [[VECINIT41:%.*]] = insertelement <8 x half> [[VECINIT31]], half [[TMP19]], i32 4
// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_85144]], align 8
// CHECK-NEXT: [[TMP20:%.*]] = bitcast <4 x half>* [[__REINT_85144]] to <4 x i16>*
// CHECK-NEXT: [[TMP21:%.*]] = load <4 x i16>, <4 x i16>* [[TMP20]], align 8
// CHECK-NEXT: [[VGET_LANE48:%.*]] = extractelement <4 x i16> [[TMP21]], i32 3
// CHECK-NEXT: store i16 [[VGET_LANE48]], i16* [[__REINT1_85145]], align 2
// CHECK-NEXT: [[TMP22:%.*]] = bitcast i16* [[__REINT1_85145]] to half*
// CHECK-NEXT: [[TMP23:%.*]] = load half, half* [[TMP22]], align 2
// CHECK-NEXT: [[VECINIT51:%.*]] = insertelement <8 x half> [[VECINIT41]], half [[TMP23]], i32 5
// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_85154]], align 8
// CHECK-NEXT: [[TMP24:%.*]] = bitcast <4 x half>* [[__REINT_85154]] to <4 x i16>*
// CHECK-NEXT: [[TMP25:%.*]] = load <4 x i16>, <4 x i16>* [[TMP24]], align 8
// CHECK-NEXT: [[VGET_LANE58:%.*]] = extractelement <4 x i16> [[TMP25]], i32 3
// CHECK-NEXT: store i16 [[VGET_LANE58]], i16* [[__REINT1_85155]], align 2
// CHECK-NEXT: [[TMP26:%.*]] = bitcast i16* [[__REINT1_85155]] to half*
// CHECK-NEXT: [[TMP27:%.*]] = load half, half* [[TMP26]], align 2
// CHECK-NEXT: [[VECINIT61:%.*]] = insertelement <8 x half> [[VECINIT51]], half [[TMP27]], i32 6
// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_85164]], align 8
// CHECK-NEXT: [[TMP28:%.*]] = bitcast <4 x half>* [[__REINT_85164]] to <4 x i16>*
// CHECK-NEXT: [[TMP29:%.*]] = load <4 x i16>, <4 x i16>* [[TMP28]], align 8
// CHECK-NEXT: [[VGET_LANE68:%.*]] = extractelement <4 x i16> [[TMP29]], i32 3
// CHECK-NEXT: store i16 [[VGET_LANE68]], i16* [[__REINT1_85165]], align 2
// CHECK-NEXT: [[TMP30:%.*]] = bitcast i16* [[__REINT1_85165]] to half*
// CHECK-NEXT: [[TMP31:%.*]] = load half, half* [[TMP30]], align 2
// CHECK-NEXT: [[VECINIT71:%.*]] = insertelement <8 x half> [[VECINIT61]], half [[TMP31]], i32 7
// CHECK-NEXT: [[TMP32:%.*]] = bitcast <4 x float> [[A:%.*]] to <16 x i8>
// CHECK-NEXT: [[TMP33:%.*]] = bitcast <8 x half> [[B:%.*]] to <16 x i8>
// CHECK-NEXT: [[TMP34:%.*]] = bitcast <8 x half> [[VECINIT71]] to <16 x i8>
// CHECK-NEXT: [[VFMLAL_HIGH3_I:%.*]] = call <4 x float> @llvm.aarch64.neon.fmlal2.v4f32.v8f16(<4 x float> [[A]], <8 x half> [[B]], <8 x half> [[VECINIT71]])
// CHECK-NEXT: ret <4 x float> [[VFMLAL_HIGH3_I]]
//
float32x4_t test_vfmlalq_lane_high_f16(float32x4_t a, float16x8_t b, float16x4_t c) {
return vfmlalq_lane_high_f16(a, b, c, 3);
}
// CHECK-LABEL: @test_vfmlal_laneq_low_f16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[__REINT_854:%.*]] = alloca <8 x half>, align 16
// CHECK-NEXT: [[__REINT1_854:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_8544:%.*]] = alloca <8 x half>, align 16
// CHECK-NEXT: [[__REINT1_8545:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85414:%.*]] = alloca <8 x half>, align 16
// CHECK-NEXT: [[__REINT1_85415:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85424:%.*]] = alloca <8 x half>, align 16
// CHECK-NEXT: [[__REINT1_85425:%.*]] = alloca i16, align 2
// CHECK-NEXT: store <8 x half> [[C:%.*]], <8 x half>* [[__REINT_854]], align 16
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x half>* [[__REINT_854]] to <8 x i16>*
// CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* [[TMP0]], align 16
// CHECK-NEXT: [[VGETQ_LANE:%.*]] = extractelement <8 x i16> [[TMP1]], i32 4
// CHECK-NEXT: store i16 [[VGETQ_LANE]], i16* [[__REINT1_854]], align 2
// CHECK-NEXT: [[TMP2:%.*]] = bitcast i16* [[__REINT1_854]] to half*
// CHECK-NEXT: [[TMP3:%.*]] = load half, half* [[TMP2]], align 2
// CHECK-NEXT: [[VECINIT:%.*]] = insertelement <4 x half> undef, half [[TMP3]], i32 0
// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_8544]], align 16
// CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x half>* [[__REINT_8544]] to <8 x i16>*
// CHECK-NEXT: [[TMP5:%.*]] = load <8 x i16>, <8 x i16>* [[TMP4]], align 16
// CHECK-NEXT: [[VGETQ_LANE8:%.*]] = extractelement <8 x i16> [[TMP5]], i32 4
// CHECK-NEXT: store i16 [[VGETQ_LANE8]], i16* [[__REINT1_8545]], align 2
// CHECK-NEXT: [[TMP6:%.*]] = bitcast i16* [[__REINT1_8545]] to half*
// CHECK-NEXT: [[TMP7:%.*]] = load half, half* [[TMP6]], align 2
// CHECK-NEXT: [[VECINIT11:%.*]] = insertelement <4 x half> [[VECINIT]], half [[TMP7]], i32 1
// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_85414]], align 16
// CHECK-NEXT: [[TMP8:%.*]] = bitcast <8 x half>* [[__REINT_85414]] to <8 x i16>*
// CHECK-NEXT: [[TMP9:%.*]] = load <8 x i16>, <8 x i16>* [[TMP8]], align 16
// CHECK-NEXT: [[VGETQ_LANE18:%.*]] = extractelement <8 x i16> [[TMP9]], i32 4
// CHECK-NEXT: store i16 [[VGETQ_LANE18]], i16* [[__REINT1_85415]], align 2
// CHECK-NEXT: [[TMP10:%.*]] = bitcast i16* [[__REINT1_85415]] to half*
// CHECK-NEXT: [[TMP11:%.*]] = load half, half* [[TMP10]], align 2
// CHECK-NEXT: [[VECINIT21:%.*]] = insertelement <4 x half> [[VECINIT11]], half [[TMP11]], i32 2
// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_85424]], align 16
// CHECK-NEXT: [[TMP12:%.*]] = bitcast <8 x half>* [[__REINT_85424]] to <8 x i16>*
// CHECK-NEXT: [[TMP13:%.*]] = load <8 x i16>, <8 x i16>* [[TMP12]], align 16
// CHECK-NEXT: [[VGETQ_LANE28:%.*]] = extractelement <8 x i16> [[TMP13]], i32 4
// CHECK-NEXT: store i16 [[VGETQ_LANE28]], i16* [[__REINT1_85425]], align 2
// CHECK-NEXT: [[TMP14:%.*]] = bitcast i16* [[__REINT1_85425]] to half*
// CHECK-NEXT: [[TMP15:%.*]] = load half, half* [[TMP14]], align 2
// CHECK-NEXT: [[VECINIT31:%.*]] = insertelement <4 x half> [[VECINIT21]], half [[TMP15]], i32 3
// CHECK-NEXT: [[TMP16:%.*]] = bitcast <2 x float> [[A:%.*]] to <8 x i8>
// CHECK-NEXT: [[TMP17:%.*]] = bitcast <4 x half> [[B:%.*]] to <8 x i8>
// CHECK-NEXT: [[TMP18:%.*]] = bitcast <4 x half> [[VECINIT31]] to <8 x i8>
// CHECK-NEXT: [[VFMLAL_LOW3_I:%.*]] = call <2 x float> @llvm.aarch64.neon.fmlal.v2f32.v4f16(<2 x float> [[A]], <4 x half> [[B]], <4 x half> [[VECINIT31]])
// CHECK-NEXT: ret <2 x float> [[VFMLAL_LOW3_I]]
//
float32x2_t test_vfmlal_laneq_low_f16(float32x2_t a, float16x4_t b, float16x8_t c) {
return vfmlal_laneq_low_f16(a, b, c, 4);
}
// CHECK-LABEL: @test_vfmlal_laneq_high_f16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[__REINT_854:%.*]] = alloca <8 x half>, align 16
// CHECK-NEXT: [[__REINT1_854:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_8544:%.*]] = alloca <8 x half>, align 16
// CHECK-NEXT: [[__REINT1_8545:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85414:%.*]] = alloca <8 x half>, align 16
// CHECK-NEXT: [[__REINT1_85415:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85424:%.*]] = alloca <8 x half>, align 16
// CHECK-NEXT: [[__REINT1_85425:%.*]] = alloca i16, align 2
// CHECK-NEXT: store <8 x half> [[C:%.*]], <8 x half>* [[__REINT_854]], align 16
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x half>* [[__REINT_854]] to <8 x i16>*
// CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* [[TMP0]], align 16
// CHECK-NEXT: [[VGETQ_LANE:%.*]] = extractelement <8 x i16> [[TMP1]], i32 5
// CHECK-NEXT: store i16 [[VGETQ_LANE]], i16* [[__REINT1_854]], align 2
// CHECK-NEXT: [[TMP2:%.*]] = bitcast i16* [[__REINT1_854]] to half*
// CHECK-NEXT: [[TMP3:%.*]] = load half, half* [[TMP2]], align 2
// CHECK-NEXT: [[VECINIT:%.*]] = insertelement <4 x half> undef, half [[TMP3]], i32 0
// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_8544]], align 16
// CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x half>* [[__REINT_8544]] to <8 x i16>*
// CHECK-NEXT: [[TMP5:%.*]] = load <8 x i16>, <8 x i16>* [[TMP4]], align 16
// CHECK-NEXT: [[VGETQ_LANE8:%.*]] = extractelement <8 x i16> [[TMP5]], i32 5
// CHECK-NEXT: store i16 [[VGETQ_LANE8]], i16* [[__REINT1_8545]], align 2
// CHECK-NEXT: [[TMP6:%.*]] = bitcast i16* [[__REINT1_8545]] to half*
// CHECK-NEXT: [[TMP7:%.*]] = load half, half* [[TMP6]], align 2
// CHECK-NEXT: [[VECINIT11:%.*]] = insertelement <4 x half> [[VECINIT]], half [[TMP7]], i32 1
// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_85414]], align 16
// CHECK-NEXT: [[TMP8:%.*]] = bitcast <8 x half>* [[__REINT_85414]] to <8 x i16>*
// CHECK-NEXT: [[TMP9:%.*]] = load <8 x i16>, <8 x i16>* [[TMP8]], align 16
// CHECK-NEXT: [[VGETQ_LANE18:%.*]] = extractelement <8 x i16> [[TMP9]], i32 5
// CHECK-NEXT: store i16 [[VGETQ_LANE18]], i16* [[__REINT1_85415]], align 2
// CHECK-NEXT: [[TMP10:%.*]] = bitcast i16* [[__REINT1_85415]] to half*
// CHECK-NEXT: [[TMP11:%.*]] = load half, half* [[TMP10]], align 2
// CHECK-NEXT: [[VECINIT21:%.*]] = insertelement <4 x half> [[VECINIT11]], half [[TMP11]], i32 2
// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_85424]], align 16
// CHECK-NEXT: [[TMP12:%.*]] = bitcast <8 x half>* [[__REINT_85424]] to <8 x i16>*
// CHECK-NEXT: [[TMP13:%.*]] = load <8 x i16>, <8 x i16>* [[TMP12]], align 16
// CHECK-NEXT: [[VGETQ_LANE28:%.*]] = extractelement <8 x i16> [[TMP13]], i32 5
// CHECK-NEXT: store i16 [[VGETQ_LANE28]], i16* [[__REINT1_85425]], align 2
// CHECK-NEXT: [[TMP14:%.*]] = bitcast i16* [[__REINT1_85425]] to half*
// CHECK-NEXT: [[TMP15:%.*]] = load half, half* [[TMP14]], align 2
// CHECK-NEXT: [[VECINIT31:%.*]] = insertelement <4 x half> [[VECINIT21]], half [[TMP15]], i32 3
// CHECK-NEXT: [[TMP16:%.*]] = bitcast <2 x float> [[A:%.*]] to <8 x i8>
// CHECK-NEXT: [[TMP17:%.*]] = bitcast <4 x half> [[B:%.*]] to <8 x i8>
// CHECK-NEXT: [[TMP18:%.*]] = bitcast <4 x half> [[VECINIT31]] to <8 x i8>
// CHECK-NEXT: [[VFMLAL_HIGH3_I:%.*]] = call <2 x float> @llvm.aarch64.neon.fmlal2.v2f32.v4f16(<2 x float> [[A]], <4 x half> [[B]], <4 x half> [[VECINIT31]])
// CHECK-NEXT: ret <2 x float> [[VFMLAL_HIGH3_I]]
//
float32x2_t test_vfmlal_laneq_high_f16(float32x2_t a, float16x4_t b, float16x8_t c) {
return vfmlal_laneq_high_f16(a, b, c, 5);
}
// CHECK-LABEL: @test_vfmlalq_laneq_low_f16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[__REINT_854:%.*]] = alloca <8 x half>, align 16
// CHECK-NEXT: [[__REINT1_854:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_8544:%.*]] = alloca <8 x half>, align 16
// CHECK-NEXT: [[__REINT1_8545:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85414:%.*]] = alloca <8 x half>, align 16
// CHECK-NEXT: [[__REINT1_85415:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85424:%.*]] = alloca <8 x half>, align 16
// CHECK-NEXT: [[__REINT1_85425:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85434:%.*]] = alloca <8 x half>, align 16
// CHECK-NEXT: [[__REINT1_85435:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85444:%.*]] = alloca <8 x half>, align 16
// CHECK-NEXT: [[__REINT1_85445:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85454:%.*]] = alloca <8 x half>, align 16
// CHECK-NEXT: [[__REINT1_85455:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85464:%.*]] = alloca <8 x half>, align 16
// CHECK-NEXT: [[__REINT1_85465:%.*]] = alloca i16, align 2
// CHECK-NEXT: store <8 x half> [[C:%.*]], <8 x half>* [[__REINT_854]], align 16
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x half>* [[__REINT_854]] to <8 x i16>*
// CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* [[TMP0]], align 16
// CHECK-NEXT: [[VGETQ_LANE:%.*]] = extractelement <8 x i16> [[TMP1]], i32 6
// CHECK-NEXT: store i16 [[VGETQ_LANE]], i16* [[__REINT1_854]], align 2
// CHECK-NEXT: [[TMP2:%.*]] = bitcast i16* [[__REINT1_854]] to half*
// CHECK-NEXT: [[TMP3:%.*]] = load half, half* [[TMP2]], align 2
// CHECK-NEXT: [[VECINIT:%.*]] = insertelement <8 x half> undef, half [[TMP3]], i32 0
// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_8544]], align 16
// CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x half>* [[__REINT_8544]] to <8 x i16>*
// CHECK-NEXT: [[TMP5:%.*]] = load <8 x i16>, <8 x i16>* [[TMP4]], align 16
// CHECK-NEXT: [[VGETQ_LANE8:%.*]] = extractelement <8 x i16> [[TMP5]], i32 6
// CHECK-NEXT: store i16 [[VGETQ_LANE8]], i16* [[__REINT1_8545]], align 2
// CHECK-NEXT: [[TMP6:%.*]] = bitcast i16* [[__REINT1_8545]] to half*
// CHECK-NEXT: [[TMP7:%.*]] = load half, half* [[TMP6]], align 2
// CHECK-NEXT: [[VECINIT11:%.*]] = insertelement <8 x half> [[VECINIT]], half [[TMP7]], i32 1
// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_85414]], align 16
// CHECK-NEXT: [[TMP8:%.*]] = bitcast <8 x half>* [[__REINT_85414]] to <8 x i16>*
// CHECK-NEXT: [[TMP9:%.*]] = load <8 x i16>, <8 x i16>* [[TMP8]], align 16
// CHECK-NEXT: [[VGETQ_LANE18:%.*]] = extractelement <8 x i16> [[TMP9]], i32 6
// CHECK-NEXT: store i16 [[VGETQ_LANE18]], i16* [[__REINT1_85415]], align 2
// CHECK-NEXT: [[TMP10:%.*]] = bitcast i16* [[__REINT1_85415]] to half*
// CHECK-NEXT: [[TMP11:%.*]] = load half, half* [[TMP10]], align 2
// CHECK-NEXT: [[VECINIT21:%.*]] = insertelement <8 x half> [[VECINIT11]], half [[TMP11]], i32 2
// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_85424]], align 16
// CHECK-NEXT: [[TMP12:%.*]] = bitcast <8 x half>* [[__REINT_85424]] to <8 x i16>*
// CHECK-NEXT: [[TMP13:%.*]] = load <8 x i16>, <8 x i16>* [[TMP12]], align 16
// CHECK-NEXT: [[VGETQ_LANE28:%.*]] = extractelement <8 x i16> [[TMP13]], i32 6
// CHECK-NEXT: store i16 [[VGETQ_LANE28]], i16* [[__REINT1_85425]], align 2
// CHECK-NEXT: [[TMP14:%.*]] = bitcast i16* [[__REINT1_85425]] to half*
// CHECK-NEXT: [[TMP15:%.*]] = load half, half* [[TMP14]], align 2
// CHECK-NEXT: [[VECINIT31:%.*]] = insertelement <8 x half> [[VECINIT21]], half [[TMP15]], i32 3
// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_85434]], align 16
// CHECK-NEXT: [[TMP16:%.*]] = bitcast <8 x half>* [[__REINT_85434]] to <8 x i16>*
// CHECK-NEXT: [[TMP17:%.*]] = load <8 x i16>, <8 x i16>* [[TMP16]], align 16
// CHECK-NEXT: [[VGETQ_LANE38:%.*]] = extractelement <8 x i16> [[TMP17]], i32 6
// CHECK-NEXT: store i16 [[VGETQ_LANE38]], i16* [[__REINT1_85435]], align 2
// CHECK-NEXT: [[TMP18:%.*]] = bitcast i16* [[__REINT1_85435]] to half*
// CHECK-NEXT: [[TMP19:%.*]] = load half, half* [[TMP18]], align 2
// CHECK-NEXT: [[VECINIT41:%.*]] = insertelement <8 x half> [[VECINIT31]], half [[TMP19]], i32 4
// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_85444]], align 16
// CHECK-NEXT: [[TMP20:%.*]] = bitcast <8 x half>* [[__REINT_85444]] to <8 x i16>*
// CHECK-NEXT: [[TMP21:%.*]] = load <8 x i16>, <8 x i16>* [[TMP20]], align 16
// CHECK-NEXT: [[VGETQ_LANE48:%.*]] = extractelement <8 x i16> [[TMP21]], i32 6
// CHECK-NEXT: store i16 [[VGETQ_LANE48]], i16* [[__REINT1_85445]], align 2
// CHECK-NEXT: [[TMP22:%.*]] = bitcast i16* [[__REINT1_85445]] to half*
// CHECK-NEXT: [[TMP23:%.*]] = load half, half* [[TMP22]], align 2
// CHECK-NEXT: [[VECINIT51:%.*]] = insertelement <8 x half> [[VECINIT41]], half [[TMP23]], i32 5
// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_85454]], align 16
// CHECK-NEXT: [[TMP24:%.*]] = bitcast <8 x half>* [[__REINT_85454]] to <8 x i16>*
// CHECK-NEXT: [[TMP25:%.*]] = load <8 x i16>, <8 x i16>* [[TMP24]], align 16
// CHECK-NEXT: [[VGETQ_LANE58:%.*]] = extractelement <8 x i16> [[TMP25]], i32 6
// CHECK-NEXT: store i16 [[VGETQ_LANE58]], i16* [[__REINT1_85455]], align 2
// CHECK-NEXT: [[TMP26:%.*]] = bitcast i16* [[__REINT1_85455]] to half*
// CHECK-NEXT: [[TMP27:%.*]] = load half, half* [[TMP26]], align 2
// CHECK-NEXT: [[VECINIT61:%.*]] = insertelement <8 x half> [[VECINIT51]], half [[TMP27]], i32 6
// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_85464]], align 16
// CHECK-NEXT: [[TMP28:%.*]] = bitcast <8 x half>* [[__REINT_85464]] to <8 x i16>*
// CHECK-NEXT: [[TMP29:%.*]] = load <8 x i16>, <8 x i16>* [[TMP28]], align 16
// CHECK-NEXT: [[VGETQ_LANE68:%.*]] = extractelement <8 x i16> [[TMP29]], i32 6
// CHECK-NEXT: store i16 [[VGETQ_LANE68]], i16* [[__REINT1_85465]], align 2
// CHECK-NEXT: [[TMP30:%.*]] = bitcast i16* [[__REINT1_85465]] to half*
// CHECK-NEXT: [[TMP31:%.*]] = load half, half* [[TMP30]], align 2
// CHECK-NEXT: [[VECINIT71:%.*]] = insertelement <8 x half> [[VECINIT61]], half [[TMP31]], i32 7
// CHECK-NEXT: [[TMP32:%.*]] = bitcast <4 x float> [[A:%.*]] to <16 x i8>
// CHECK-NEXT: [[TMP33:%.*]] = bitcast <8 x half> [[B:%.*]] to <16 x i8>
// CHECK-NEXT: [[TMP34:%.*]] = bitcast <8 x half> [[VECINIT71]] to <16 x i8>
// CHECK-NEXT: [[VFMLAL_LOW3_I:%.*]] = call <4 x float> @llvm.aarch64.neon.fmlal.v4f32.v8f16(<4 x float> [[A]], <8 x half> [[B]], <8 x half> [[VECINIT71]])
// CHECK-NEXT: ret <4 x float> [[VFMLAL_LOW3_I]]
//
float32x4_t test_vfmlalq_laneq_low_f16(float32x4_t a, float16x8_t b, float16x8_t c) {
return vfmlalq_laneq_low_f16(a, b, c, 6);
}
// CHECK-LABEL: @test_vfmlalq_laneq_high_f16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[__REINT_854:%.*]] = alloca <8 x half>, align 16
// CHECK-NEXT: [[__REINT1_854:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_8544:%.*]] = alloca <8 x half>, align 16
// CHECK-NEXT: [[__REINT1_8545:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85414:%.*]] = alloca <8 x half>, align 16
// CHECK-NEXT: [[__REINT1_85415:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85424:%.*]] = alloca <8 x half>, align 16
// CHECK-NEXT: [[__REINT1_85425:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85434:%.*]] = alloca <8 x half>, align 16
// CHECK-NEXT: [[__REINT1_85435:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85444:%.*]] = alloca <8 x half>, align 16
// CHECK-NEXT: [[__REINT1_85445:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85454:%.*]] = alloca <8 x half>, align 16
// CHECK-NEXT: [[__REINT1_85455:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85464:%.*]] = alloca <8 x half>, align 16
// CHECK-NEXT: [[__REINT1_85465:%.*]] = alloca i16, align 2
// CHECK-NEXT: store <8 x half> [[C:%.*]], <8 x half>* [[__REINT_854]], align 16
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x half>* [[__REINT_854]] to <8 x i16>*
// CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* [[TMP0]], align 16
// CHECK-NEXT: [[VGETQ_LANE:%.*]] = extractelement <8 x i16> [[TMP1]], i32 7
// CHECK-NEXT: store i16 [[VGETQ_LANE]], i16* [[__REINT1_854]], align 2
// CHECK-NEXT: [[TMP2:%.*]] = bitcast i16* [[__REINT1_854]] to half*
// CHECK-NEXT: [[TMP3:%.*]] = load half, half* [[TMP2]], align 2
// CHECK-NEXT: [[VECINIT:%.*]] = insertelement <8 x half> undef, half [[TMP3]], i32 0
// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_8544]], align 16
// CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x half>* [[__REINT_8544]] to <8 x i16>*
// CHECK-NEXT: [[TMP5:%.*]] = load <8 x i16>, <8 x i16>* [[TMP4]], align 16
// CHECK-NEXT: [[VGETQ_LANE8:%.*]] = extractelement <8 x i16> [[TMP5]], i32 7
// CHECK-NEXT: store i16 [[VGETQ_LANE8]], i16* [[__REINT1_8545]], align 2
// CHECK-NEXT: [[TMP6:%.*]] = bitcast i16* [[__REINT1_8545]] to half*
// CHECK-NEXT: [[TMP7:%.*]] = load half, half* [[TMP6]], align 2
// CHECK-NEXT: [[VECINIT11:%.*]] = insertelement <8 x half> [[VECINIT]], half [[TMP7]], i32 1
// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_85414]], align 16
// CHECK-NEXT: [[TMP8:%.*]] = bitcast <8 x half>* [[__REINT_85414]] to <8 x i16>*
// CHECK-NEXT: [[TMP9:%.*]] = load <8 x i16>, <8 x i16>* [[TMP8]], align 16
// CHECK-NEXT: [[VGETQ_LANE18:%.*]] = extractelement <8 x i16> [[TMP9]], i32 7
// CHECK-NEXT: store i16 [[VGETQ_LANE18]], i16* [[__REINT1_85415]], align 2
// CHECK-NEXT: [[TMP10:%.*]] = bitcast i16* [[__REINT1_85415]] to half*
// CHECK-NEXT: [[TMP11:%.*]] = load half, half* [[TMP10]], align 2
// CHECK-NEXT: [[VECINIT21:%.*]] = insertelement <8 x half> [[VECINIT11]], half [[TMP11]], i32 2
// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_85424]], align 16
// CHECK-NEXT: [[TMP12:%.*]] = bitcast <8 x half>* [[__REINT_85424]] to <8 x i16>*
// CHECK-NEXT: [[TMP13:%.*]] = load <8 x i16>, <8 x i16>* [[TMP12]], align 16
// CHECK-NEXT: [[VGETQ_LANE28:%.*]] = extractelement <8 x i16> [[TMP13]], i32 7
// CHECK-NEXT: store i16 [[VGETQ_LANE28]], i16* [[__REINT1_85425]], align 2
// CHECK-NEXT: [[TMP14:%.*]] = bitcast i16* [[__REINT1_85425]] to half*
// CHECK-NEXT: [[TMP15:%.*]] = load half, half* [[TMP14]], align 2
// CHECK-NEXT: [[VECINIT31:%.*]] = insertelement <8 x half> [[VECINIT21]], half [[TMP15]], i32 3
// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_85434]], align 16
// CHECK-NEXT: [[TMP16:%.*]] = bitcast <8 x half>* [[__REINT_85434]] to <8 x i16>*
// CHECK-NEXT: [[TMP17:%.*]] = load <8 x i16>, <8 x i16>* [[TMP16]], align 16
// CHECK-NEXT: [[VGETQ_LANE38:%.*]] = extractelement <8 x i16> [[TMP17]], i32 7
// CHECK-NEXT: store i16 [[VGETQ_LANE38]], i16* [[__REINT1_85435]], align 2
// CHECK-NEXT: [[TMP18:%.*]] = bitcast i16* [[__REINT1_85435]] to half*
// CHECK-NEXT: [[TMP19:%.*]] = load half, half* [[TMP18]], align 2
// CHECK-NEXT: [[VECINIT41:%.*]] = insertelement <8 x half> [[VECINIT31]], half [[TMP19]], i32 4
// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_85444]], align 16
// CHECK-NEXT: [[TMP20:%.*]] = bitcast <8 x half>* [[__REINT_85444]] to <8 x i16>*
// CHECK-NEXT: [[TMP21:%.*]] = load <8 x i16>, <8 x i16>* [[TMP20]], align 16
// CHECK-NEXT: [[VGETQ_LANE48:%.*]] = extractelement <8 x i16> [[TMP21]], i32 7
// CHECK-NEXT: store i16 [[VGETQ_LANE48]], i16* [[__REINT1_85445]], align 2
// CHECK-NEXT: [[TMP22:%.*]] = bitcast i16* [[__REINT1_85445]] to half*
// CHECK-NEXT: [[TMP23:%.*]] = load half, half* [[TMP22]], align 2
// CHECK-NEXT: [[VECINIT51:%.*]] = insertelement <8 x half> [[VECINIT41]], half [[TMP23]], i32 5
// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_85454]], align 16
// CHECK-NEXT: [[TMP24:%.*]] = bitcast <8 x half>* [[__REINT_85454]] to <8 x i16>*
// CHECK-NEXT: [[TMP25:%.*]] = load <8 x i16>, <8 x i16>* [[TMP24]], align 16
// CHECK-NEXT: [[VGETQ_LANE58:%.*]] = extractelement <8 x i16> [[TMP25]], i32 7
// CHECK-NEXT: store i16 [[VGETQ_LANE58]], i16* [[__REINT1_85455]], align 2
// CHECK-NEXT: [[TMP26:%.*]] = bitcast i16* [[__REINT1_85455]] to half*
// CHECK-NEXT: [[TMP27:%.*]] = load half, half* [[TMP26]], align 2
// CHECK-NEXT: [[VECINIT61:%.*]] = insertelement <8 x half> [[VECINIT51]], half [[TMP27]], i32 6
// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_85464]], align 16
// CHECK-NEXT: [[TMP28:%.*]] = bitcast <8 x half>* [[__REINT_85464]] to <8 x i16>*
// CHECK-NEXT: [[TMP29:%.*]] = load <8 x i16>, <8 x i16>* [[TMP28]], align 16
// CHECK-NEXT: [[VGETQ_LANE68:%.*]] = extractelement <8 x i16> [[TMP29]], i32 7
// CHECK-NEXT: store i16 [[VGETQ_LANE68]], i16* [[__REINT1_85465]], align 2
// CHECK-NEXT: [[TMP30:%.*]] = bitcast i16* [[__REINT1_85465]] to half*
// CHECK-NEXT: [[TMP31:%.*]] = load half, half* [[TMP30]], align 2
// CHECK-NEXT: [[VECINIT71:%.*]] = insertelement <8 x half> [[VECINIT61]], half [[TMP31]], i32 7
// CHECK-NEXT: [[TMP32:%.*]] = bitcast <4 x float> [[A:%.*]] to <16 x i8>
// CHECK-NEXT: [[TMP33:%.*]] = bitcast <8 x half> [[B:%.*]] to <16 x i8>
// CHECK-NEXT: [[TMP34:%.*]] = bitcast <8 x half> [[VECINIT71]] to <16 x i8>
// CHECK-NEXT: [[VFMLAL_HIGH3_I:%.*]] = call <4 x float> @llvm.aarch64.neon.fmlal2.v4f32.v8f16(<4 x float> [[A]], <8 x half> [[B]], <8 x half> [[VECINIT71]])
// CHECK-NEXT: ret <4 x float> [[VFMLAL_HIGH3_I]]
//
float32x4_t test_vfmlalq_laneq_high_f16(float32x4_t a, float16x8_t b, float16x8_t c) {
return vfmlalq_laneq_high_f16(a, b, c, 7);
}
// CHECK-LABEL: @test_vfmlsl_lane_low_f16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[__REINT_851:%.*]] = alloca <4 x half>, align 8
// CHECK-NEXT: [[__REINT1_851:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_8514:%.*]] = alloca <4 x half>, align 8
// CHECK-NEXT: [[__REINT1_8515:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85114:%.*]] = alloca <4 x half>, align 8
// CHECK-NEXT: [[__REINT1_85115:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85124:%.*]] = alloca <4 x half>, align 8
// CHECK-NEXT: [[__REINT1_85125:%.*]] = alloca i16, align 2
// CHECK-NEXT: store <4 x half> [[C:%.*]], <4 x half>* [[__REINT_851]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x half>* [[__REINT_851]] to <4 x i16>*
// CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, <4 x i16>* [[TMP0]], align 8
// CHECK-NEXT: [[VGET_LANE:%.*]] = extractelement <4 x i16> [[TMP1]], i32 0
// CHECK-NEXT: store i16 [[VGET_LANE]], i16* [[__REINT1_851]], align 2
// CHECK-NEXT: [[TMP2:%.*]] = bitcast i16* [[__REINT1_851]] to half*
// CHECK-NEXT: [[TMP3:%.*]] = load half, half* [[TMP2]], align 2
// CHECK-NEXT: [[VECINIT:%.*]] = insertelement <4 x half> undef, half [[TMP3]], i32 0
// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_8514]], align 8
// CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x half>* [[__REINT_8514]] to <4 x i16>*
// CHECK-NEXT: [[TMP5:%.*]] = load <4 x i16>, <4 x i16>* [[TMP4]], align 8
// CHECK-NEXT: [[VGET_LANE8:%.*]] = extractelement <4 x i16> [[TMP5]], i32 0
// CHECK-NEXT: store i16 [[VGET_LANE8]], i16* [[__REINT1_8515]], align 2
// CHECK-NEXT: [[TMP6:%.*]] = bitcast i16* [[__REINT1_8515]] to half*
// CHECK-NEXT: [[TMP7:%.*]] = load half, half* [[TMP6]], align 2
// CHECK-NEXT: [[VECINIT11:%.*]] = insertelement <4 x half> [[VECINIT]], half [[TMP7]], i32 1
// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_85114]], align 8
// CHECK-NEXT: [[TMP8:%.*]] = bitcast <4 x half>* [[__REINT_85114]] to <4 x i16>*
// CHECK-NEXT: [[TMP9:%.*]] = load <4 x i16>, <4 x i16>* [[TMP8]], align 8
// CHECK-NEXT: [[VGET_LANE18:%.*]] = extractelement <4 x i16> [[TMP9]], i32 0
// CHECK-NEXT: store i16 [[VGET_LANE18]], i16* [[__REINT1_85115]], align 2
// CHECK-NEXT: [[TMP10:%.*]] = bitcast i16* [[__REINT1_85115]] to half*
// CHECK-NEXT: [[TMP11:%.*]] = load half, half* [[TMP10]], align 2
// CHECK-NEXT: [[VECINIT21:%.*]] = insertelement <4 x half> [[VECINIT11]], half [[TMP11]], i32 2
// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_85124]], align 8
// CHECK-NEXT: [[TMP12:%.*]] = bitcast <4 x half>* [[__REINT_85124]] to <4 x i16>*
// CHECK-NEXT: [[TMP13:%.*]] = load <4 x i16>, <4 x i16>* [[TMP12]], align 8
// CHECK-NEXT: [[VGET_LANE28:%.*]] = extractelement <4 x i16> [[TMP13]], i32 0
// CHECK-NEXT: store i16 [[VGET_LANE28]], i16* [[__REINT1_85125]], align 2
// CHECK-NEXT: [[TMP14:%.*]] = bitcast i16* [[__REINT1_85125]] to half*
// CHECK-NEXT: [[TMP15:%.*]] = load half, half* [[TMP14]], align 2
// CHECK-NEXT: [[VECINIT31:%.*]] = insertelement <4 x half> [[VECINIT21]], half [[TMP15]], i32 3
// CHECK-NEXT: [[TMP16:%.*]] = bitcast <2 x float> [[A:%.*]] to <8 x i8>
// CHECK-NEXT: [[TMP17:%.*]] = bitcast <4 x half> [[B:%.*]] to <8 x i8>
// CHECK-NEXT: [[TMP18:%.*]] = bitcast <4 x half> [[VECINIT31]] to <8 x i8>
// CHECK-NEXT: [[VFMLSL_LOW3_I:%.*]] = call <2 x float> @llvm.aarch64.neon.fmlsl.v2f32.v4f16(<2 x float> [[A]], <4 x half> [[B]], <4 x half> [[VECINIT31]])
// CHECK-NEXT: ret <2 x float> [[VFMLSL_LOW3_I]]
//
float32x2_t test_vfmlsl_lane_low_f16(float32x2_t a, float16x4_t b, float16x4_t c) {
return vfmlsl_lane_low_f16(a, b, c, 0);
}
// CHECK-LABEL: @test_vfmlsl_lane_high_f16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[__REINT_851:%.*]] = alloca <4 x half>, align 8
// CHECK-NEXT: [[__REINT1_851:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_8514:%.*]] = alloca <4 x half>, align 8
// CHECK-NEXT: [[__REINT1_8515:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85114:%.*]] = alloca <4 x half>, align 8
// CHECK-NEXT: [[__REINT1_85115:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85124:%.*]] = alloca <4 x half>, align 8
// CHECK-NEXT: [[__REINT1_85125:%.*]] = alloca i16, align 2
// CHECK-NEXT: store <4 x half> [[C:%.*]], <4 x half>* [[__REINT_851]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x half>* [[__REINT_851]] to <4 x i16>*
// CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, <4 x i16>* [[TMP0]], align 8
// CHECK-NEXT: [[VGET_LANE:%.*]] = extractelement <4 x i16> [[TMP1]], i32 1
// CHECK-NEXT: store i16 [[VGET_LANE]], i16* [[__REINT1_851]], align 2
// CHECK-NEXT: [[TMP2:%.*]] = bitcast i16* [[__REINT1_851]] to half*
// CHECK-NEXT: [[TMP3:%.*]] = load half, half* [[TMP2]], align 2
// CHECK-NEXT: [[VECINIT:%.*]] = insertelement <4 x half> undef, half [[TMP3]], i32 0
// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_8514]], align 8
// CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x half>* [[__REINT_8514]] to <4 x i16>*
// CHECK-NEXT: [[TMP5:%.*]] = load <4 x i16>, <4 x i16>* [[TMP4]], align 8
// CHECK-NEXT: [[VGET_LANE8:%.*]] = extractelement <4 x i16> [[TMP5]], i32 1
// CHECK-NEXT: store i16 [[VGET_LANE8]], i16* [[__REINT1_8515]], align 2
// CHECK-NEXT: [[TMP6:%.*]] = bitcast i16* [[__REINT1_8515]] to half*
// CHECK-NEXT: [[TMP7:%.*]] = load half, half* [[TMP6]], align 2
// CHECK-NEXT: [[VECINIT11:%.*]] = insertelement <4 x half> [[VECINIT]], half [[TMP7]], i32 1
// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_85114]], align 8
// CHECK-NEXT: [[TMP8:%.*]] = bitcast <4 x half>* [[__REINT_85114]] to <4 x i16>*
// CHECK-NEXT: [[TMP9:%.*]] = load <4 x i16>, <4 x i16>* [[TMP8]], align 8
// CHECK-NEXT: [[VGET_LANE18:%.*]] = extractelement <4 x i16> [[TMP9]], i32 1
// CHECK-NEXT: store i16 [[VGET_LANE18]], i16* [[__REINT1_85115]], align 2
// CHECK-NEXT: [[TMP10:%.*]] = bitcast i16* [[__REINT1_85115]] to half*
// CHECK-NEXT: [[TMP11:%.*]] = load half, half* [[TMP10]], align 2
// CHECK-NEXT: [[VECINIT21:%.*]] = insertelement <4 x half> [[VECINIT11]], half [[TMP11]], i32 2
// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_85124]], align 8
// CHECK-NEXT: [[TMP12:%.*]] = bitcast <4 x half>* [[__REINT_85124]] to <4 x i16>*
// CHECK-NEXT: [[TMP13:%.*]] = load <4 x i16>, <4 x i16>* [[TMP12]], align 8
// CHECK-NEXT: [[VGET_LANE28:%.*]] = extractelement <4 x i16> [[TMP13]], i32 1
// CHECK-NEXT: store i16 [[VGET_LANE28]], i16* [[__REINT1_85125]], align 2
// CHECK-NEXT: [[TMP14:%.*]] = bitcast i16* [[__REINT1_85125]] to half*
// CHECK-NEXT: [[TMP15:%.*]] = load half, half* [[TMP14]], align 2
// CHECK-NEXT: [[VECINIT31:%.*]] = insertelement <4 x half> [[VECINIT21]], half [[TMP15]], i32 3
// CHECK-NEXT: [[TMP16:%.*]] = bitcast <2 x float> [[A:%.*]] to <8 x i8>
// CHECK-NEXT: [[TMP17:%.*]] = bitcast <4 x half> [[B:%.*]] to <8 x i8>
// CHECK-NEXT: [[TMP18:%.*]] = bitcast <4 x half> [[VECINIT31]] to <8 x i8>
// CHECK-NEXT: [[VFMLSL_HIGH3_I:%.*]] = call <2 x float> @llvm.aarch64.neon.fmlsl2.v2f32.v4f16(<2 x float> [[A]], <4 x half> [[B]], <4 x half> [[VECINIT31]])
// CHECK-NEXT: ret <2 x float> [[VFMLSL_HIGH3_I]]
//
float32x2_t test_vfmlsl_lane_high_f16(float32x2_t a, float16x4_t b, float16x4_t c) {
return vfmlsl_lane_high_f16(a, b, c, 1);
}
// CHECK-LABEL: @test_vfmlslq_lane_low_f16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[__REINT_851:%.*]] = alloca <4 x half>, align 8
// CHECK-NEXT: [[__REINT1_851:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_8514:%.*]] = alloca <4 x half>, align 8
// CHECK-NEXT: [[__REINT1_8515:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85114:%.*]] = alloca <4 x half>, align 8
// CHECK-NEXT: [[__REINT1_85115:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85124:%.*]] = alloca <4 x half>, align 8
// CHECK-NEXT: [[__REINT1_85125:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85134:%.*]] = alloca <4 x half>, align 8
// CHECK-NEXT: [[__REINT1_85135:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85144:%.*]] = alloca <4 x half>, align 8
// CHECK-NEXT: [[__REINT1_85145:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85154:%.*]] = alloca <4 x half>, align 8
// CHECK-NEXT: [[__REINT1_85155:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85164:%.*]] = alloca <4 x half>, align 8
// CHECK-NEXT: [[__REINT1_85165:%.*]] = alloca i16, align 2
// CHECK-NEXT: store <4 x half> [[C:%.*]], <4 x half>* [[__REINT_851]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x half>* [[__REINT_851]] to <4 x i16>*
// CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, <4 x i16>* [[TMP0]], align 8
// CHECK-NEXT: [[VGET_LANE:%.*]] = extractelement <4 x i16> [[TMP1]], i32 2
// CHECK-NEXT: store i16 [[VGET_LANE]], i16* [[__REINT1_851]], align 2
// CHECK-NEXT: [[TMP2:%.*]] = bitcast i16* [[__REINT1_851]] to half*
// CHECK-NEXT: [[TMP3:%.*]] = load half, half* [[TMP2]], align 2
// CHECK-NEXT: [[VECINIT:%.*]] = insertelement <8 x half> undef, half [[TMP3]], i32 0
// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_8514]], align 8
// CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x half>* [[__REINT_8514]] to <4 x i16>*
// CHECK-NEXT: [[TMP5:%.*]] = load <4 x i16>, <4 x i16>* [[TMP4]], align 8
// CHECK-NEXT: [[VGET_LANE8:%.*]] = extractelement <4 x i16> [[TMP5]], i32 2
// CHECK-NEXT: store i16 [[VGET_LANE8]], i16* [[__REINT1_8515]], align 2
// CHECK-NEXT: [[TMP6:%.*]] = bitcast i16* [[__REINT1_8515]] to half*
// CHECK-NEXT: [[TMP7:%.*]] = load half, half* [[TMP6]], align 2
// CHECK-NEXT: [[VECINIT11:%.*]] = insertelement <8 x half> [[VECINIT]], half [[TMP7]], i32 1
// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_85114]], align 8
// CHECK-NEXT: [[TMP8:%.*]] = bitcast <4 x half>* [[__REINT_85114]] to <4 x i16>*
// CHECK-NEXT: [[TMP9:%.*]] = load <4 x i16>, <4 x i16>* [[TMP8]], align 8
// CHECK-NEXT: [[VGET_LANE18:%.*]] = extractelement <4 x i16> [[TMP9]], i32 2
// CHECK-NEXT: store i16 [[VGET_LANE18]], i16* [[__REINT1_85115]], align 2
// CHECK-NEXT: [[TMP10:%.*]] = bitcast i16* [[__REINT1_85115]] to half*
// CHECK-NEXT: [[TMP11:%.*]] = load half, half* [[TMP10]], align 2
// CHECK-NEXT: [[VECINIT21:%.*]] = insertelement <8 x half> [[VECINIT11]], half [[TMP11]], i32 2
// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_85124]], align 8
// CHECK-NEXT: [[TMP12:%.*]] = bitcast <4 x half>* [[__REINT_85124]] to <4 x i16>*
// CHECK-NEXT: [[TMP13:%.*]] = load <4 x i16>, <4 x i16>* [[TMP12]], align 8
// CHECK-NEXT: [[VGET_LANE28:%.*]] = extractelement <4 x i16> [[TMP13]], i32 2
// CHECK-NEXT: store i16 [[VGET_LANE28]], i16* [[__REINT1_85125]], align 2
// CHECK-NEXT: [[TMP14:%.*]] = bitcast i16* [[__REINT1_85125]] to half*
// CHECK-NEXT: [[TMP15:%.*]] = load half, half* [[TMP14]], align 2
// CHECK-NEXT: [[VECINIT31:%.*]] = insertelement <8 x half> [[VECINIT21]], half [[TMP15]], i32 3
// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_85134]], align 8
// CHECK-NEXT: [[TMP16:%.*]] = bitcast <4 x half>* [[__REINT_85134]] to <4 x i16>*
// CHECK-NEXT: [[TMP17:%.*]] = load <4 x i16>, <4 x i16>* [[TMP16]], align 8
// CHECK-NEXT: [[VGET_LANE38:%.*]] = extractelement <4 x i16> [[TMP17]], i32 2
// CHECK-NEXT: store i16 [[VGET_LANE38]], i16* [[__REINT1_85135]], align 2
// CHECK-NEXT: [[TMP18:%.*]] = bitcast i16* [[__REINT1_85135]] to half*
// CHECK-NEXT: [[TMP19:%.*]] = load half, half* [[TMP18]], align 2
// CHECK-NEXT: [[VECINIT41:%.*]] = insertelement <8 x half> [[VECINIT31]], half [[TMP19]], i32 4
// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_85144]], align 8
// CHECK-NEXT: [[TMP20:%.*]] = bitcast <4 x half>* [[__REINT_85144]] to <4 x i16>*
// CHECK-NEXT: [[TMP21:%.*]] = load <4 x i16>, <4 x i16>* [[TMP20]], align 8
// CHECK-NEXT: [[VGET_LANE48:%.*]] = extractelement <4 x i16> [[TMP21]], i32 2
// CHECK-NEXT: store i16 [[VGET_LANE48]], i16* [[__REINT1_85145]], align 2
// CHECK-NEXT: [[TMP22:%.*]] = bitcast i16* [[__REINT1_85145]] to half*
// CHECK-NEXT: [[TMP23:%.*]] = load half, half* [[TMP22]], align 2
// CHECK-NEXT: [[VECINIT51:%.*]] = insertelement <8 x half> [[VECINIT41]], half [[TMP23]], i32 5
// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_85154]], align 8
// CHECK-NEXT: [[TMP24:%.*]] = bitcast <4 x half>* [[__REINT_85154]] to <4 x i16>*
// CHECK-NEXT: [[TMP25:%.*]] = load <4 x i16>, <4 x i16>* [[TMP24]], align 8
// CHECK-NEXT: [[VGET_LANE58:%.*]] = extractelement <4 x i16> [[TMP25]], i32 2
// CHECK-NEXT: store i16 [[VGET_LANE58]], i16* [[__REINT1_85155]], align 2
// CHECK-NEXT: [[TMP26:%.*]] = bitcast i16* [[__REINT1_85155]] to half*
// CHECK-NEXT: [[TMP27:%.*]] = load half, half* [[TMP26]], align 2
// CHECK-NEXT: [[VECINIT61:%.*]] = insertelement <8 x half> [[VECINIT51]], half [[TMP27]], i32 6
// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_85164]], align 8
// CHECK-NEXT: [[TMP28:%.*]] = bitcast <4 x half>* [[__REINT_85164]] to <4 x i16>*
// CHECK-NEXT: [[TMP29:%.*]] = load <4 x i16>, <4 x i16>* [[TMP28]], align 8
// CHECK-NEXT: [[VGET_LANE68:%.*]] = extractelement <4 x i16> [[TMP29]], i32 2
// CHECK-NEXT: store i16 [[VGET_LANE68]], i16* [[__REINT1_85165]], align 2
// CHECK-NEXT: [[TMP30:%.*]] = bitcast i16* [[__REINT1_85165]] to half*
// CHECK-NEXT: [[TMP31:%.*]] = load half, half* [[TMP30]], align 2
// CHECK-NEXT: [[VECINIT71:%.*]] = insertelement <8 x half> [[VECINIT61]], half [[TMP31]], i32 7
// CHECK-NEXT: [[TMP32:%.*]] = bitcast <4 x float> [[A:%.*]] to <16 x i8>
// CHECK-NEXT: [[TMP33:%.*]] = bitcast <8 x half> [[B:%.*]] to <16 x i8>
// CHECK-NEXT: [[TMP34:%.*]] = bitcast <8 x half> [[VECINIT71]] to <16 x i8>
// CHECK-NEXT: [[VFMLSL_LOW3_I:%.*]] = call <4 x float> @llvm.aarch64.neon.fmlsl.v4f32.v8f16(<4 x float> [[A]], <8 x half> [[B]], <8 x half> [[VECINIT71]])
// CHECK-NEXT: ret <4 x float> [[VFMLSL_LOW3_I]]
//
float32x4_t test_vfmlslq_lane_low_f16(float32x4_t a, float16x8_t b, float16x4_t c) {
return vfmlslq_lane_low_f16(a, b, c, 2);
}
// CHECK-LABEL: @test_vfmlslq_lane_high_f16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[__REINT_851:%.*]] = alloca <4 x half>, align 8
// CHECK-NEXT: [[__REINT1_851:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_8514:%.*]] = alloca <4 x half>, align 8
// CHECK-NEXT: [[__REINT1_8515:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85114:%.*]] = alloca <4 x half>, align 8
// CHECK-NEXT: [[__REINT1_85115:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85124:%.*]] = alloca <4 x half>, align 8
// CHECK-NEXT: [[__REINT1_85125:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85134:%.*]] = alloca <4 x half>, align 8
// CHECK-NEXT: [[__REINT1_85135:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85144:%.*]] = alloca <4 x half>, align 8
// CHECK-NEXT: [[__REINT1_85145:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85154:%.*]] = alloca <4 x half>, align 8
// CHECK-NEXT: [[__REINT1_85155:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85164:%.*]] = alloca <4 x half>, align 8
// CHECK-NEXT: [[__REINT1_85165:%.*]] = alloca i16, align 2
// CHECK-NEXT: store <4 x half> [[C:%.*]], <4 x half>* [[__REINT_851]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x half>* [[__REINT_851]] to <4 x i16>*
// CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, <4 x i16>* [[TMP0]], align 8
// CHECK-NEXT: [[VGET_LANE:%.*]] = extractelement <4 x i16> [[TMP1]], i32 3
// CHECK-NEXT: store i16 [[VGET_LANE]], i16* [[__REINT1_851]], align 2
// CHECK-NEXT: [[TMP2:%.*]] = bitcast i16* [[__REINT1_851]] to half*
// CHECK-NEXT: [[TMP3:%.*]] = load half, half* [[TMP2]], align 2
// CHECK-NEXT: [[VECINIT:%.*]] = insertelement <8 x half> undef, half [[TMP3]], i32 0
// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_8514]], align 8
// CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x half>* [[__REINT_8514]] to <4 x i16>*
// CHECK-NEXT: [[TMP5:%.*]] = load <4 x i16>, <4 x i16>* [[TMP4]], align 8
// CHECK-NEXT: [[VGET_LANE8:%.*]] = extractelement <4 x i16> [[TMP5]], i32 3
// CHECK-NEXT: store i16 [[VGET_LANE8]], i16* [[__REINT1_8515]], align 2
// CHECK-NEXT: [[TMP6:%.*]] = bitcast i16* [[__REINT1_8515]] to half*
// CHECK-NEXT: [[TMP7:%.*]] = load half, half* [[TMP6]], align 2
// CHECK-NEXT: [[VECINIT11:%.*]] = insertelement <8 x half> [[VECINIT]], half [[TMP7]], i32 1
// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_85114]], align 8
// CHECK-NEXT: [[TMP8:%.*]] = bitcast <4 x half>* [[__REINT_85114]] to <4 x i16>*
// CHECK-NEXT: [[TMP9:%.*]] = load <4 x i16>, <4 x i16>* [[TMP8]], align 8
// CHECK-NEXT: [[VGET_LANE18:%.*]] = extractelement <4 x i16> [[TMP9]], i32 3
// CHECK-NEXT: store i16 [[VGET_LANE18]], i16* [[__REINT1_85115]], align 2
// CHECK-NEXT: [[TMP10:%.*]] = bitcast i16* [[__REINT1_85115]] to half*
// CHECK-NEXT: [[TMP11:%.*]] = load half, half* [[TMP10]], align 2
// CHECK-NEXT: [[VECINIT21:%.*]] = insertelement <8 x half> [[VECINIT11]], half [[TMP11]], i32 2
// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_85124]], align 8
// CHECK-NEXT: [[TMP12:%.*]] = bitcast <4 x half>* [[__REINT_85124]] to <4 x i16>*
// CHECK-NEXT: [[TMP13:%.*]] = load <4 x i16>, <4 x i16>* [[TMP12]], align 8
// CHECK-NEXT: [[VGET_LANE28:%.*]] = extractelement <4 x i16> [[TMP13]], i32 3
// CHECK-NEXT: store i16 [[VGET_LANE28]], i16* [[__REINT1_85125]], align 2
// CHECK-NEXT: [[TMP14:%.*]] = bitcast i16* [[__REINT1_85125]] to half*
// CHECK-NEXT: [[TMP15:%.*]] = load half, half* [[TMP14]], align 2
// CHECK-NEXT: [[VECINIT31:%.*]] = insertelement <8 x half> [[VECINIT21]], half [[TMP15]], i32 3
// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_85134]], align 8
// CHECK-NEXT: [[TMP16:%.*]] = bitcast <4 x half>* [[__REINT_85134]] to <4 x i16>*
// CHECK-NEXT: [[TMP17:%.*]] = load <4 x i16>, <4 x i16>* [[TMP16]], align 8
// CHECK-NEXT: [[VGET_LANE38:%.*]] = extractelement <4 x i16> [[TMP17]], i32 3
// CHECK-NEXT: store i16 [[VGET_LANE38]], i16* [[__REINT1_85135]], align 2
// CHECK-NEXT: [[TMP18:%.*]] = bitcast i16* [[__REINT1_85135]] to half*
// CHECK-NEXT: [[TMP19:%.*]] = load half, half* [[TMP18]], align 2
// CHECK-NEXT: [[VECINIT41:%.*]] = insertelement <8 x half> [[VECINIT31]], half [[TMP19]], i32 4
// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_85144]], align 8
// CHECK-NEXT: [[TMP20:%.*]] = bitcast <4 x half>* [[__REINT_85144]] to <4 x i16>*
// CHECK-NEXT: [[TMP21:%.*]] = load <4 x i16>, <4 x i16>* [[TMP20]], align 8
// CHECK-NEXT: [[VGET_LANE48:%.*]] = extractelement <4 x i16> [[TMP21]], i32 3
// CHECK-NEXT: store i16 [[VGET_LANE48]], i16* [[__REINT1_85145]], align 2
// CHECK-NEXT: [[TMP22:%.*]] = bitcast i16* [[__REINT1_85145]] to half*
// CHECK-NEXT: [[TMP23:%.*]] = load half, half* [[TMP22]], align 2
// CHECK-NEXT: [[VECINIT51:%.*]] = insertelement <8 x half> [[VECINIT41]], half [[TMP23]], i32 5
// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_85154]], align 8
// CHECK-NEXT: [[TMP24:%.*]] = bitcast <4 x half>* [[__REINT_85154]] to <4 x i16>*
// CHECK-NEXT: [[TMP25:%.*]] = load <4 x i16>, <4 x i16>* [[TMP24]], align 8
// CHECK-NEXT: [[VGET_LANE58:%.*]] = extractelement <4 x i16> [[TMP25]], i32 3
// CHECK-NEXT: store i16 [[VGET_LANE58]], i16* [[__REINT1_85155]], align 2
// CHECK-NEXT: [[TMP26:%.*]] = bitcast i16* [[__REINT1_85155]] to half*
// CHECK-NEXT: [[TMP27:%.*]] = load half, half* [[TMP26]], align 2
// CHECK-NEXT: [[VECINIT61:%.*]] = insertelement <8 x half> [[VECINIT51]], half [[TMP27]], i32 6
// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_85164]], align 8
// CHECK-NEXT: [[TMP28:%.*]] = bitcast <4 x half>* [[__REINT_85164]] to <4 x i16>*
// CHECK-NEXT: [[TMP29:%.*]] = load <4 x i16>, <4 x i16>* [[TMP28]], align 8
// CHECK-NEXT: [[VGET_LANE68:%.*]] = extractelement <4 x i16> [[TMP29]], i32 3
// CHECK-NEXT: store i16 [[VGET_LANE68]], i16* [[__REINT1_85165]], align 2
// CHECK-NEXT: [[TMP30:%.*]] = bitcast i16* [[__REINT1_85165]] to half*
// CHECK-NEXT: [[TMP31:%.*]] = load half, half* [[TMP30]], align 2
// CHECK-NEXT: [[VECINIT71:%.*]] = insertelement <8 x half> [[VECINIT61]], half [[TMP31]], i32 7
// CHECK-NEXT: [[TMP32:%.*]] = bitcast <4 x float> [[A:%.*]] to <16 x i8>
// CHECK-NEXT: [[TMP33:%.*]] = bitcast <8 x half> [[B:%.*]] to <16 x i8>
// CHECK-NEXT: [[TMP34:%.*]] = bitcast <8 x half> [[VECINIT71]] to <16 x i8>
// CHECK-NEXT: [[VFMLSL_HIGH3_I:%.*]] = call <4 x float> @llvm.aarch64.neon.fmlsl2.v4f32.v8f16(<4 x float> [[A]], <8 x half> [[B]], <8 x half> [[VECINIT71]])
// CHECK-NEXT: ret <4 x float> [[VFMLSL_HIGH3_I]]
//
float32x4_t test_vfmlslq_lane_high_f16(float32x4_t a, float16x8_t b, float16x4_t c) {
return vfmlslq_lane_high_f16(a, b, c, 3);
}
// CHECK-LABEL: @test_vfmlsl_laneq_low_f16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[__REINT_854:%.*]] = alloca <8 x half>, align 16
// CHECK-NEXT: [[__REINT1_854:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_8544:%.*]] = alloca <8 x half>, align 16
// CHECK-NEXT: [[__REINT1_8545:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85414:%.*]] = alloca <8 x half>, align 16
// CHECK-NEXT: [[__REINT1_85415:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85424:%.*]] = alloca <8 x half>, align 16
// CHECK-NEXT: [[__REINT1_85425:%.*]] = alloca i16, align 2
// CHECK-NEXT: store <8 x half> [[C:%.*]], <8 x half>* [[__REINT_854]], align 16
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x half>* [[__REINT_854]] to <8 x i16>*
// CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* [[TMP0]], align 16
// CHECK-NEXT: [[VGETQ_LANE:%.*]] = extractelement <8 x i16> [[TMP1]], i32 4
// CHECK-NEXT: store i16 [[VGETQ_LANE]], i16* [[__REINT1_854]], align 2
// CHECK-NEXT: [[TMP2:%.*]] = bitcast i16* [[__REINT1_854]] to half*
// CHECK-NEXT: [[TMP3:%.*]] = load half, half* [[TMP2]], align 2
// CHECK-NEXT: [[VECINIT:%.*]] = insertelement <4 x half> undef, half [[TMP3]], i32 0
// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_8544]], align 16
// CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x half>* [[__REINT_8544]] to <8 x i16>*
// CHECK-NEXT: [[TMP5:%.*]] = load <8 x i16>, <8 x i16>* [[TMP4]], align 16
// CHECK-NEXT: [[VGETQ_LANE8:%.*]] = extractelement <8 x i16> [[TMP5]], i32 4
// CHECK-NEXT: store i16 [[VGETQ_LANE8]], i16* [[__REINT1_8545]], align 2
// CHECK-NEXT: [[TMP6:%.*]] = bitcast i16* [[__REINT1_8545]] to half*
// CHECK-NEXT: [[TMP7:%.*]] = load half, half* [[TMP6]], align 2
// CHECK-NEXT: [[VECINIT11:%.*]] = insertelement <4 x half> [[VECINIT]], half [[TMP7]], i32 1
// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_85414]], align 16
// CHECK-NEXT: [[TMP8:%.*]] = bitcast <8 x half>* [[__REINT_85414]] to <8 x i16>*
// CHECK-NEXT: [[TMP9:%.*]] = load <8 x i16>, <8 x i16>* [[TMP8]], align 16
// CHECK-NEXT: [[VGETQ_LANE18:%.*]] = extractelement <8 x i16> [[TMP9]], i32 4
// CHECK-NEXT: store i16 [[VGETQ_LANE18]], i16* [[__REINT1_85415]], align 2
// CHECK-NEXT: [[TMP10:%.*]] = bitcast i16* [[__REINT1_85415]] to half*
// CHECK-NEXT: [[TMP11:%.*]] = load half, half* [[TMP10]], align 2
// CHECK-NEXT: [[VECINIT21:%.*]] = insertelement <4 x half> [[VECINIT11]], half [[TMP11]], i32 2
// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_85424]], align 16
// CHECK-NEXT: [[TMP12:%.*]] = bitcast <8 x half>* [[__REINT_85424]] to <8 x i16>*
// CHECK-NEXT: [[TMP13:%.*]] = load <8 x i16>, <8 x i16>* [[TMP12]], align 16
// CHECK-NEXT: [[VGETQ_LANE28:%.*]] = extractelement <8 x i16> [[TMP13]], i32 4
// CHECK-NEXT: store i16 [[VGETQ_LANE28]], i16* [[__REINT1_85425]], align 2
// CHECK-NEXT: [[TMP14:%.*]] = bitcast i16* [[__REINT1_85425]] to half*
// CHECK-NEXT: [[TMP15:%.*]] = load half, half* [[TMP14]], align 2
// CHECK-NEXT: [[VECINIT31:%.*]] = insertelement <4 x half> [[VECINIT21]], half [[TMP15]], i32 3
// CHECK-NEXT: [[TMP16:%.*]] = bitcast <2 x float> [[A:%.*]] to <8 x i8>
// CHECK-NEXT: [[TMP17:%.*]] = bitcast <4 x half> [[B:%.*]] to <8 x i8>
// CHECK-NEXT: [[TMP18:%.*]] = bitcast <4 x half> [[VECINIT31]] to <8 x i8>
// CHECK-NEXT: [[VFMLSL_LOW3_I:%.*]] = call <2 x float> @llvm.aarch64.neon.fmlsl.v2f32.v4f16(<2 x float> [[A]], <4 x half> [[B]], <4 x half> [[VECINIT31]])
// CHECK-NEXT: ret <2 x float> [[VFMLSL_LOW3_I]]
//
float32x2_t test_vfmlsl_laneq_low_f16(float32x2_t a, float16x4_t b, float16x8_t c) {
return vfmlsl_laneq_low_f16(a, b, c, 4);
}
// CHECK-LABEL: @test_vfmlsl_laneq_high_f16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[__REINT_854:%.*]] = alloca <8 x half>, align 16
// CHECK-NEXT: [[__REINT1_854:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_8544:%.*]] = alloca <8 x half>, align 16
// CHECK-NEXT: [[__REINT1_8545:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85414:%.*]] = alloca <8 x half>, align 16
// CHECK-NEXT: [[__REINT1_85415:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85424:%.*]] = alloca <8 x half>, align 16
// CHECK-NEXT: [[__REINT1_85425:%.*]] = alloca i16, align 2
// CHECK-NEXT: store <8 x half> [[C:%.*]], <8 x half>* [[__REINT_854]], align 16
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x half>* [[__REINT_854]] to <8 x i16>*
// CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* [[TMP0]], align 16
// CHECK-NEXT: [[VGETQ_LANE:%.*]] = extractelement <8 x i16> [[TMP1]], i32 5
// CHECK-NEXT: store i16 [[VGETQ_LANE]], i16* [[__REINT1_854]], align 2
// CHECK-NEXT: [[TMP2:%.*]] = bitcast i16* [[__REINT1_854]] to half*
// CHECK-NEXT: [[TMP3:%.*]] = load half, half* [[TMP2]], align 2
// CHECK-NEXT: [[VECINIT:%.*]] = insertelement <4 x half> undef, half [[TMP3]], i32 0
// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_8544]], align 16
// CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x half>* [[__REINT_8544]] to <8 x i16>*
// CHECK-NEXT: [[TMP5:%.*]] = load <8 x i16>, <8 x i16>* [[TMP4]], align 16
// CHECK-NEXT: [[VGETQ_LANE8:%.*]] = extractelement <8 x i16> [[TMP5]], i32 5
// CHECK-NEXT: store i16 [[VGETQ_LANE8]], i16* [[__REINT1_8545]], align 2
// CHECK-NEXT: [[TMP6:%.*]] = bitcast i16* [[__REINT1_8545]] to half*
// CHECK-NEXT: [[TMP7:%.*]] = load half, half* [[TMP6]], align 2
// CHECK-NEXT: [[VECINIT11:%.*]] = insertelement <4 x half> [[VECINIT]], half [[TMP7]], i32 1
// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_85414]], align 16
// CHECK-NEXT: [[TMP8:%.*]] = bitcast <8 x half>* [[__REINT_85414]] to <8 x i16>*
// CHECK-NEXT: [[TMP9:%.*]] = load <8 x i16>, <8 x i16>* [[TMP8]], align 16
// CHECK-NEXT: [[VGETQ_LANE18:%.*]] = extractelement <8 x i16> [[TMP9]], i32 5
// CHECK-NEXT: store i16 [[VGETQ_LANE18]], i16* [[__REINT1_85415]], align 2
// CHECK-NEXT: [[TMP10:%.*]] = bitcast i16* [[__REINT1_85415]] to half*
// CHECK-NEXT: [[TMP11:%.*]] = load half, half* [[TMP10]], align 2
// CHECK-NEXT: [[VECINIT21:%.*]] = insertelement <4 x half> [[VECINIT11]], half [[TMP11]], i32 2
// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_85424]], align 16
// CHECK-NEXT: [[TMP12:%.*]] = bitcast <8 x half>* [[__REINT_85424]] to <8 x i16>*
// CHECK-NEXT: [[TMP13:%.*]] = load <8 x i16>, <8 x i16>* [[TMP12]], align 16
// CHECK-NEXT: [[VGETQ_LANE28:%.*]] = extractelement <8 x i16> [[TMP13]], i32 5
// CHECK-NEXT: store i16 [[VGETQ_LANE28]], i16* [[__REINT1_85425]], align 2
// CHECK-NEXT: [[TMP14:%.*]] = bitcast i16* [[__REINT1_85425]] to half*
// CHECK-NEXT: [[TMP15:%.*]] = load half, half* [[TMP14]], align 2
// CHECK-NEXT: [[VECINIT31:%.*]] = insertelement <4 x half> [[VECINIT21]], half [[TMP15]], i32 3
// CHECK-NEXT: [[TMP16:%.*]] = bitcast <2 x float> [[A:%.*]] to <8 x i8>
// CHECK-NEXT: [[TMP17:%.*]] = bitcast <4 x half> [[B:%.*]] to <8 x i8>
// CHECK-NEXT: [[TMP18:%.*]] = bitcast <4 x half> [[VECINIT31]] to <8 x i8>
// CHECK-NEXT: [[VFMLSL_HIGH3_I:%.*]] = call <2 x float> @llvm.aarch64.neon.fmlsl2.v2f32.v4f16(<2 x float> [[A]], <4 x half> [[B]], <4 x half> [[VECINIT31]])
// CHECK-NEXT: ret <2 x float> [[VFMLSL_HIGH3_I]]
//
float32x2_t test_vfmlsl_laneq_high_f16(float32x2_t a, float16x4_t b, float16x8_t c) {
return vfmlsl_laneq_high_f16(a, b, c, 5);
}
// CHECK-LABEL: @test_vfmlslq_laneq_low_f16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[__REINT_854:%.*]] = alloca <8 x half>, align 16
// CHECK-NEXT: [[__REINT1_854:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_8544:%.*]] = alloca <8 x half>, align 16
// CHECK-NEXT: [[__REINT1_8545:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85414:%.*]] = alloca <8 x half>, align 16
// CHECK-NEXT: [[__REINT1_85415:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85424:%.*]] = alloca <8 x half>, align 16
// CHECK-NEXT: [[__REINT1_85425:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85434:%.*]] = alloca <8 x half>, align 16
// CHECK-NEXT: [[__REINT1_85435:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85444:%.*]] = alloca <8 x half>, align 16
// CHECK-NEXT: [[__REINT1_85445:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85454:%.*]] = alloca <8 x half>, align 16
// CHECK-NEXT: [[__REINT1_85455:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85464:%.*]] = alloca <8 x half>, align 16
// CHECK-NEXT: [[__REINT1_85465:%.*]] = alloca i16, align 2
// CHECK-NEXT: store <8 x half> [[C:%.*]], <8 x half>* [[__REINT_854]], align 16
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x half>* [[__REINT_854]] to <8 x i16>*
// CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* [[TMP0]], align 16
// CHECK-NEXT: [[VGETQ_LANE:%.*]] = extractelement <8 x i16> [[TMP1]], i32 6
// CHECK-NEXT: store i16 [[VGETQ_LANE]], i16* [[__REINT1_854]], align 2
// CHECK-NEXT: [[TMP2:%.*]] = bitcast i16* [[__REINT1_854]] to half*
// CHECK-NEXT: [[TMP3:%.*]] = load half, half* [[TMP2]], align 2
// CHECK-NEXT: [[VECINIT:%.*]] = insertelement <8 x half> undef, half [[TMP3]], i32 0
// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_8544]], align 16
// CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x half>* [[__REINT_8544]] to <8 x i16>*
// CHECK-NEXT: [[TMP5:%.*]] = load <8 x i16>, <8 x i16>* [[TMP4]], align 16
// CHECK-NEXT: [[VGETQ_LANE8:%.*]] = extractelement <8 x i16> [[TMP5]], i32 6
// CHECK-NEXT: store i16 [[VGETQ_LANE8]], i16* [[__REINT1_8545]], align 2
// CHECK-NEXT: [[TMP6:%.*]] = bitcast i16* [[__REINT1_8545]] to half*
// CHECK-NEXT: [[TMP7:%.*]] = load half, half* [[TMP6]], align 2
// CHECK-NEXT: [[VECINIT11:%.*]] = insertelement <8 x half> [[VECINIT]], half [[TMP7]], i32 1
// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_85414]], align 16
// CHECK-NEXT: [[TMP8:%.*]] = bitcast <8 x half>* [[__REINT_85414]] to <8 x i16>*
// CHECK-NEXT: [[TMP9:%.*]] = load <8 x i16>, <8 x i16>* [[TMP8]], align 16
// CHECK-NEXT: [[VGETQ_LANE18:%.*]] = extractelement <8 x i16> [[TMP9]], i32 6
// CHECK-NEXT: store i16 [[VGETQ_LANE18]], i16* [[__REINT1_85415]], align 2
// CHECK-NEXT: [[TMP10:%.*]] = bitcast i16* [[__REINT1_85415]] to half*
// CHECK-NEXT: [[TMP11:%.*]] = load half, half* [[TMP10]], align 2
// CHECK-NEXT: [[VECINIT21:%.*]] = insertelement <8 x half> [[VECINIT11]], half [[TMP11]], i32 2
// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_85424]], align 16
// CHECK-NEXT: [[TMP12:%.*]] = bitcast <8 x half>* [[__REINT_85424]] to <8 x i16>*
// CHECK-NEXT: [[TMP13:%.*]] = load <8 x i16>, <8 x i16>* [[TMP12]], align 16
// CHECK-NEXT: [[VGETQ_LANE28:%.*]] = extractelement <8 x i16> [[TMP13]], i32 6
// CHECK-NEXT: store i16 [[VGETQ_LANE28]], i16* [[__REINT1_85425]], align 2
// CHECK-NEXT: [[TMP14:%.*]] = bitcast i16* [[__REINT1_85425]] to half*
// CHECK-NEXT: [[TMP15:%.*]] = load half, half* [[TMP14]], align 2
// CHECK-NEXT: [[VECINIT31:%.*]] = insertelement <8 x half> [[VECINIT21]], half [[TMP15]], i32 3
// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_85434]], align 16
// CHECK-NEXT: [[TMP16:%.*]] = bitcast <8 x half>* [[__REINT_85434]] to <8 x i16>*
// CHECK-NEXT: [[TMP17:%.*]] = load <8 x i16>, <8 x i16>* [[TMP16]], align 16
// CHECK-NEXT: [[VGETQ_LANE38:%.*]] = extractelement <8 x i16> [[TMP17]], i32 6
// CHECK-NEXT: store i16 [[VGETQ_LANE38]], i16* [[__REINT1_85435]], align 2
// CHECK-NEXT: [[TMP18:%.*]] = bitcast i16* [[__REINT1_85435]] to half*
// CHECK-NEXT: [[TMP19:%.*]] = load half, half* [[TMP18]], align 2
// CHECK-NEXT: [[VECINIT41:%.*]] = insertelement <8 x half> [[VECINIT31]], half [[TMP19]], i32 4
// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_85444]], align 16
// CHECK-NEXT: [[TMP20:%.*]] = bitcast <8 x half>* [[__REINT_85444]] to <8 x i16>*
// CHECK-NEXT: [[TMP21:%.*]] = load <8 x i16>, <8 x i16>* [[TMP20]], align 16
// CHECK-NEXT: [[VGETQ_LANE48:%.*]] = extractelement <8 x i16> [[TMP21]], i32 6
// CHECK-NEXT: store i16 [[VGETQ_LANE48]], i16* [[__REINT1_85445]], align 2
// CHECK-NEXT: [[TMP22:%.*]] = bitcast i16* [[__REINT1_85445]] to half*
// CHECK-NEXT: [[TMP23:%.*]] = load half, half* [[TMP22]], align 2
// CHECK-NEXT: [[VECINIT51:%.*]] = insertelement <8 x half> [[VECINIT41]], half [[TMP23]], i32 5
// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_85454]], align 16
// CHECK-NEXT: [[TMP24:%.*]] = bitcast <8 x half>* [[__REINT_85454]] to <8 x i16>*
// CHECK-NEXT: [[TMP25:%.*]] = load <8 x i16>, <8 x i16>* [[TMP24]], align 16
// CHECK-NEXT: [[VGETQ_LANE58:%.*]] = extractelement <8 x i16> [[TMP25]], i32 6
// CHECK-NEXT: store i16 [[VGETQ_LANE58]], i16* [[__REINT1_85455]], align 2
// CHECK-NEXT: [[TMP26:%.*]] = bitcast i16* [[__REINT1_85455]] to half*
// CHECK-NEXT: [[TMP27:%.*]] = load half, half* [[TMP26]], align 2
// CHECK-NEXT: [[VECINIT61:%.*]] = insertelement <8 x half> [[VECINIT51]], half [[TMP27]], i32 6
// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_85464]], align 16
// CHECK-NEXT: [[TMP28:%.*]] = bitcast <8 x half>* [[__REINT_85464]] to <8 x i16>*
// CHECK-NEXT: [[TMP29:%.*]] = load <8 x i16>, <8 x i16>* [[TMP28]], align 16
// CHECK-NEXT: [[VGETQ_LANE68:%.*]] = extractelement <8 x i16> [[TMP29]], i32 6
// CHECK-NEXT: store i16 [[VGETQ_LANE68]], i16* [[__REINT1_85465]], align 2
// CHECK-NEXT: [[TMP30:%.*]] = bitcast i16* [[__REINT1_85465]] to half*
// CHECK-NEXT: [[TMP31:%.*]] = load half, half* [[TMP30]], align 2
// CHECK-NEXT: [[VECINIT71:%.*]] = insertelement <8 x half> [[VECINIT61]], half [[TMP31]], i32 7
// CHECK-NEXT: [[TMP32:%.*]] = bitcast <4 x float> [[A:%.*]] to <16 x i8>
// CHECK-NEXT: [[TMP33:%.*]] = bitcast <8 x half> [[B:%.*]] to <16 x i8>
// CHECK-NEXT: [[TMP34:%.*]] = bitcast <8 x half> [[VECINIT71]] to <16 x i8>
// CHECK-NEXT: [[VFMLSL_LOW3_I:%.*]] = call <4 x float> @llvm.aarch64.neon.fmlsl.v4f32.v8f16(<4 x float> [[A]], <8 x half> [[B]], <8 x half> [[VECINIT71]])
// CHECK-NEXT: ret <4 x float> [[VFMLSL_LOW3_I]]
//
float32x4_t test_vfmlslq_laneq_low_f16(float32x4_t a, float16x8_t b, float16x8_t c) {
return vfmlslq_laneq_low_f16(a, b, c, 6);
}
// CHECK-LABEL: @test_vfmlslq_laneq_high_f16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[__REINT_854:%.*]] = alloca <8 x half>, align 16
// CHECK-NEXT: [[__REINT1_854:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_8544:%.*]] = alloca <8 x half>, align 16
// CHECK-NEXT: [[__REINT1_8545:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85414:%.*]] = alloca <8 x half>, align 16
// CHECK-NEXT: [[__REINT1_85415:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85424:%.*]] = alloca <8 x half>, align 16
// CHECK-NEXT: [[__REINT1_85425:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85434:%.*]] = alloca <8 x half>, align 16
// CHECK-NEXT: [[__REINT1_85435:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85444:%.*]] = alloca <8 x half>, align 16
// CHECK-NEXT: [[__REINT1_85445:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85454:%.*]] = alloca <8 x half>, align 16
// CHECK-NEXT: [[__REINT1_85455:%.*]] = alloca i16, align 2
// CHECK-NEXT: [[__REINT_85464:%.*]] = alloca <8 x half>, align 16
// CHECK-NEXT: [[__REINT1_85465:%.*]] = alloca i16, align 2
// CHECK-NEXT: store <8 x half> [[C:%.*]], <8 x half>* [[__REINT_854]], align 16
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x half>* [[__REINT_854]] to <8 x i16>*
// CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* [[TMP0]], align 16
// CHECK-NEXT: [[VGETQ_LANE:%.*]] = extractelement <8 x i16> [[TMP1]], i32 7
// CHECK-NEXT: store i16 [[VGETQ_LANE]], i16* [[__REINT1_854]], align 2
// CHECK-NEXT: [[TMP2:%.*]] = bitcast i16* [[__REINT1_854]] to half*
// CHECK-NEXT: [[TMP3:%.*]] = load half, half* [[TMP2]], align 2
// CHECK-NEXT: [[VECINIT:%.*]] = insertelement <8 x half> undef, half [[TMP3]], i32 0
// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_8544]], align 16
// CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x half>* [[__REINT_8544]] to <8 x i16>*
// CHECK-NEXT: [[TMP5:%.*]] = load <8 x i16>, <8 x i16>* [[TMP4]], align 16
// CHECK-NEXT: [[VGETQ_LANE8:%.*]] = extractelement <8 x i16> [[TMP5]], i32 7
// CHECK-NEXT: store i16 [[VGETQ_LANE8]], i16* [[__REINT1_8545]], align 2
// CHECK-NEXT: [[TMP6:%.*]] = bitcast i16* [[__REINT1_8545]] to half*
// CHECK-NEXT: [[TMP7:%.*]] = load half, half* [[TMP6]], align 2
// CHECK-NEXT: [[VECINIT11:%.*]] = insertelement <8 x half> [[VECINIT]], half [[TMP7]], i32 1
// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_85414]], align 16
// CHECK-NEXT: [[TMP8:%.*]] = bitcast <8 x half>* [[__REINT_85414]] to <8 x i16>*
// CHECK-NEXT: [[TMP9:%.*]] = load <8 x i16>, <8 x i16>* [[TMP8]], align 16
// CHECK-NEXT: [[VGETQ_LANE18:%.*]] = extractelement <8 x i16> [[TMP9]], i32 7
// CHECK-NEXT: store i16 [[VGETQ_LANE18]], i16* [[__REINT1_85415]], align 2
// CHECK-NEXT: [[TMP10:%.*]] = bitcast i16* [[__REINT1_85415]] to half*
// CHECK-NEXT: [[TMP11:%.*]] = load half, half* [[TMP10]], align 2
// CHECK-NEXT: [[VECINIT21:%.*]] = insertelement <8 x half> [[VECINIT11]], half [[TMP11]], i32 2
// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_85424]], align 16
// CHECK-NEXT: [[TMP12:%.*]] = bitcast <8 x half>* [[__REINT_85424]] to <8 x i16>*
// CHECK-NEXT: [[TMP13:%.*]] = load <8 x i16>, <8 x i16>* [[TMP12]], align 16
// CHECK-NEXT: [[VGETQ_LANE28:%.*]] = extractelement <8 x i16> [[TMP13]], i32 7
// CHECK-NEXT: store i16 [[VGETQ_LANE28]], i16* [[__REINT1_85425]], align 2
// CHECK-NEXT: [[TMP14:%.*]] = bitcast i16* [[__REINT1_85425]] to half*
// CHECK-NEXT: [[TMP15:%.*]] = load half, half* [[TMP14]], align 2
// CHECK-NEXT: [[VECINIT31:%.*]] = insertelement <8 x half> [[VECINIT21]], half [[TMP15]], i32 3
// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_85434]], align 16
// CHECK-NEXT: [[TMP16:%.*]] = bitcast <8 x half>* [[__REINT_85434]] to <8 x i16>*
// CHECK-NEXT: [[TMP17:%.*]] = load <8 x i16>, <8 x i16>* [[TMP16]], align 16
// CHECK-NEXT: [[VGETQ_LANE38:%.*]] = extractelement <8 x i16> [[TMP17]], i32 7
// CHECK-NEXT: store i16 [[VGETQ_LANE38]], i16* [[__REINT1_85435]], align 2
// CHECK-NEXT: [[TMP18:%.*]] = bitcast i16* [[__REINT1_85435]] to half*
// CHECK-NEXT: [[TMP19:%.*]] = load half, half* [[TMP18]], align 2
// CHECK-NEXT: [[VECINIT41:%.*]] = insertelement <8 x half> [[VECINIT31]], half [[TMP19]], i32 4
// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_85444]], align 16
// CHECK-NEXT: [[TMP20:%.*]] = bitcast <8 x half>* [[__REINT_85444]] to <8 x i16>*
// CHECK-NEXT: [[TMP21:%.*]] = load <8 x i16>, <8 x i16>* [[TMP20]], align 16
// CHECK-NEXT: [[VGETQ_LANE48:%.*]] = extractelement <8 x i16> [[TMP21]], i32 7
// CHECK-NEXT: store i16 [[VGETQ_LANE48]], i16* [[__REINT1_85445]], align 2
// CHECK-NEXT: [[TMP22:%.*]] = bitcast i16* [[__REINT1_85445]] to half*
// CHECK-NEXT: [[TMP23:%.*]] = load half, half* [[TMP22]], align 2
// CHECK-NEXT: [[VECINIT51:%.*]] = insertelement <8 x half> [[VECINIT41]], half [[TMP23]], i32 5
// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_85454]], align 16
// CHECK-NEXT: [[TMP24:%.*]] = bitcast <8 x half>* [[__REINT_85454]] to <8 x i16>*
// CHECK-NEXT: [[TMP25:%.*]] = load <8 x i16>, <8 x i16>* [[TMP24]], align 16
// CHECK-NEXT: [[VGETQ_LANE58:%.*]] = extractelement <8 x i16> [[TMP25]], i32 7
// CHECK-NEXT: store i16 [[VGETQ_LANE58]], i16* [[__REINT1_85455]], align 2
// CHECK-NEXT: [[TMP26:%.*]] = bitcast i16* [[__REINT1_85455]] to half*
// CHECK-NEXT: [[TMP27:%.*]] = load half, half* [[TMP26]], align 2
// CHECK-NEXT: [[VECINIT61:%.*]] = insertelement <8 x half> [[VECINIT51]], half [[TMP27]], i32 6
// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_85464]], align 16
// CHECK-NEXT: [[TMP28:%.*]] = bitcast <8 x half>* [[__REINT_85464]] to <8 x i16>*
// CHECK-NEXT: [[TMP29:%.*]] = load <8 x i16>, <8 x i16>* [[TMP28]], align 16
// CHECK-NEXT: [[VGETQ_LANE68:%.*]] = extractelement <8 x i16> [[TMP29]], i32 7
// CHECK-NEXT: store i16 [[VGETQ_LANE68]], i16* [[__REINT1_85465]], align 2
// CHECK-NEXT: [[TMP30:%.*]] = bitcast i16* [[__REINT1_85465]] to half*
// CHECK-NEXT: [[TMP31:%.*]] = load half, half* [[TMP30]], align 2
// CHECK-NEXT: [[VECINIT71:%.*]] = insertelement <8 x half> [[VECINIT61]], half [[TMP31]], i32 7
// CHECK-NEXT: [[TMP32:%.*]] = bitcast <4 x float> [[A:%.*]] to <16 x i8>
// CHECK-NEXT: [[TMP33:%.*]] = bitcast <8 x half> [[B:%.*]] to <16 x i8>
// CHECK-NEXT: [[TMP34:%.*]] = bitcast <8 x half> [[VECINIT71]] to <16 x i8>
// CHECK-NEXT: [[VFMLSL_HIGH3_I:%.*]] = call <4 x float> @llvm.aarch64.neon.fmlsl2.v4f32.v8f16(<4 x float> [[A]], <8 x half> [[B]], <8 x half> [[VECINIT71]])
// CHECK-NEXT: ret <4 x float> [[VFMLSL_HIGH3_I]]
//
float32x4_t test_vfmlslq_laneq_high_f16(float32x4_t a, float16x8_t b, float16x8_t c) {
return vfmlslq_laneq_high_f16(a, b, c, 7);
}