| // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py |
| // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve \ |
| // RUN: -disable-O0-optnone \ |
| // RUN: -emit-llvm -o - %s | opt -S -passes=sroa | FileCheck %s |
| |
| // REQUIRES: aarch64-registered-target |
| |
| #include <arm_sve.h> |
| |
| // ADDITION |
| |
| // CHECK-LABEL: @add_i8( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[ADD:%.*]] = add <vscale x 16 x i8> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 16 x i8> [[ADD]] |
| // |
| svint8_t add_i8(svint8_t a, svint8_t b) { |
| return a + b; |
| } |
| |
| // CHECK-LABEL: @add_i16( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[ADD:%.*]] = add <vscale x 8 x i16> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 8 x i16> [[ADD]] |
| // |
| svint16_t add_i16(svint16_t a, svint16_t b) { |
| return a + b; |
| } |
| |
| // CHECK-LABEL: @add_i32( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[ADD:%.*]] = add <vscale x 4 x i32> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 4 x i32> [[ADD]] |
| // |
| svint32_t add_i32(svint32_t a, svint32_t b) { |
| return a + b; |
| } |
| |
| // CHECK-LABEL: @add_i64( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[ADD:%.*]] = add <vscale x 2 x i64> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 2 x i64> [[ADD]] |
| // |
| svint64_t add_i64(svint64_t a, svint64_t b) { |
| return a + b; |
| } |
| |
| // CHECK-LABEL: @add_u8( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[ADD:%.*]] = add <vscale x 16 x i8> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 16 x i8> [[ADD]] |
| // |
| svuint8_t add_u8(svuint8_t a, svuint8_t b) { |
| return a + b; |
| } |
| |
| // CHECK-LABEL: @add_u16( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[ADD:%.*]] = add <vscale x 8 x i16> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 8 x i16> [[ADD]] |
| // |
| svuint16_t add_u16(svuint16_t a, svuint16_t b) { |
| return a + b; |
| } |
| |
| // CHECK-LABEL: @add_u32( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[ADD:%.*]] = add <vscale x 4 x i32> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 4 x i32> [[ADD]] |
| // |
| svuint32_t add_u32(svuint32_t a, svuint32_t b) { |
| return a + b; |
| } |
| |
| // CHECK-LABEL: @add_u64( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[ADD:%.*]] = add <vscale x 2 x i64> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 2 x i64> [[ADD]] |
| // |
| svuint64_t add_u64(svuint64_t a, svuint64_t b) { |
| return a + b; |
| } |
| |
| // CHECK-LABEL: @add_f16( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[ADD:%.*]] = fadd <vscale x 8 x half> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 8 x half> [[ADD]] |
| // |
| svfloat16_t add_f16(svfloat16_t a, svfloat16_t b) { |
| return a + b; |
| } |
| |
| // CHECK-LABEL: @add_f32( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[ADD:%.*]] = fadd <vscale x 4 x float> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 4 x float> [[ADD]] |
| // |
| svfloat32_t add_f32(svfloat32_t a, svfloat32_t b) { |
| return a + b; |
| } |
| |
| // CHECK-LABEL: @add_f64( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[ADD:%.*]] = fadd <vscale x 2 x double> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 2 x double> [[ADD]] |
| // |
| svfloat64_t add_f64(svfloat64_t a, svfloat64_t b) { |
| return a + b; |
| } |
| |
| // CHECK-LABEL: @add_inplace_i8( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[ADD:%.*]] = add <vscale x 16 x i8> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 16 x i8> [[ADD]] |
| // |
| svint8_t add_inplace_i8(svint8_t a, svint8_t b) { |
| return a += b; |
| } |
| |
| // CHECK-LABEL: @add_inplace_i16( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[ADD:%.*]] = add <vscale x 8 x i16> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 8 x i16> [[ADD]] |
| // |
| svint16_t add_inplace_i16(svint16_t a, svint16_t b) { |
| return a += b; |
| } |
| |
| // CHECK-LABEL: @add_inplace_i32( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[ADD:%.*]] = add <vscale x 4 x i32> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 4 x i32> [[ADD]] |
| // |
| svint32_t add_inplace_i32(svint32_t a, svint32_t b) { |
| return a += b; |
| } |
| |
| // CHECK-LABEL: @add_inplace_i64( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[ADD:%.*]] = add <vscale x 2 x i64> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 2 x i64> [[ADD]] |
| // |
| svint64_t add_inplace_i64(svint64_t a, svint64_t b) { |
| return a += b; |
| } |
| |
| // CHECK-LABEL: @add_inplace_u8( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[ADD:%.*]] = add <vscale x 16 x i8> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 16 x i8> [[ADD]] |
| // |
| svuint8_t add_inplace_u8(svuint8_t a, svuint8_t b) { |
| return a += b; |
| } |
| |
| // CHECK-LABEL: @add_inplace_u16( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[ADD:%.*]] = add <vscale x 8 x i16> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 8 x i16> [[ADD]] |
| // |
| svuint16_t add_inplace_u16(svuint16_t a, svuint16_t b) { |
| return a += b; |
| } |
| |
| // CHECK-LABEL: @add_inplace_u32( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[ADD:%.*]] = add <vscale x 4 x i32> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 4 x i32> [[ADD]] |
| // |
| svuint32_t add_inplace_u32(svuint32_t a, svuint32_t b) { |
| return a += b; |
| } |
| |
| // CHECK-LABEL: @add_inplace_u64( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[ADD:%.*]] = add <vscale x 2 x i64> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 2 x i64> [[ADD]] |
| // |
| svuint64_t add_inplace_u64(svuint64_t a, svuint64_t b) { |
| return a += b; |
| } |
| |
| // CHECK-LABEL: @add_inplace_f16( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[ADD:%.*]] = fadd <vscale x 8 x half> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 8 x half> [[ADD]] |
| // |
| svfloat16_t add_inplace_f16(svfloat16_t a, svfloat16_t b) { |
| return a += b; |
| } |
| |
| // CHECK-LABEL: @add_inplace_f32( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[ADD:%.*]] = fadd <vscale x 4 x float> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 4 x float> [[ADD]] |
| // |
| svfloat32_t add_inplace_f32(svfloat32_t a, svfloat32_t b) { |
| return a += b; |
| } |
| |
| // CHECK-LABEL: @add_inplace_f64( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[ADD:%.*]] = fadd <vscale x 2 x double> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 2 x double> [[ADD]] |
| // |
| svfloat64_t add_inplace_f64(svfloat64_t a, svfloat64_t b) { |
| return a += b; |
| } |
| |
| // CHECK-LABEL: @add_scalar_i8( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[B:%.*]], i64 0 |
| // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[SPLAT_SPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer |
| // CHECK-NEXT: [[ADD:%.*]] = add <vscale x 16 x i8> [[A:%.*]], [[SPLAT_SPLAT]] |
| // CHECK-NEXT: ret <vscale x 16 x i8> [[ADD]] |
| // |
| svint8_t add_scalar_i8(svint8_t a, int8_t b) { |
| return a + b; |
| } |
| |
| // CHECK-LABEL: @add_scalar_i16( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[B:%.*]], i64 0 |
| // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 8 x i16> [[SPLAT_SPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer |
| // CHECK-NEXT: [[ADD:%.*]] = add <vscale x 8 x i16> [[A:%.*]], [[SPLAT_SPLAT]] |
| // CHECK-NEXT: ret <vscale x 8 x i16> [[ADD]] |
| // |
| svint16_t add_scalar_i16(svint16_t a, int16_t b) { |
| return a + b; |
| } |
| |
| // CHECK-LABEL: @add_scalar_i32( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[B:%.*]], i64 0 |
| // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[SPLAT_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer |
| // CHECK-NEXT: [[ADD:%.*]] = add <vscale x 4 x i32> [[A:%.*]], [[SPLAT_SPLAT]] |
| // CHECK-NEXT: ret <vscale x 4 x i32> [[ADD]] |
| // |
| svint32_t add_scalar_i32(svint32_t a, int32_t b) { |
| return a + b; |
| } |
| |
| // CHECK-LABEL: @add_scalar_i64( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[B:%.*]], i64 0 |
| // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[SPLAT_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer |
| // CHECK-NEXT: [[ADD:%.*]] = add <vscale x 2 x i64> [[A:%.*]], [[SPLAT_SPLAT]] |
| // CHECK-NEXT: ret <vscale x 2 x i64> [[ADD]] |
| // |
| svint64_t add_scalar_i64(svint64_t a, int64_t b) { |
| return a + b; |
| } |
| |
| // CHECK-LABEL: @add_scalar_u8( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[B:%.*]], i64 0 |
| // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[SPLAT_SPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer |
| // CHECK-NEXT: [[ADD:%.*]] = add <vscale x 16 x i8> [[A:%.*]], [[SPLAT_SPLAT]] |
| // CHECK-NEXT: ret <vscale x 16 x i8> [[ADD]] |
| // |
| svuint8_t add_scalar_u8(svuint8_t a, uint8_t b) { |
| return a + b; |
| } |
| |
| // CHECK-LABEL: @add_scalar_u16( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[B:%.*]], i64 0 |
| // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 8 x i16> [[SPLAT_SPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer |
| // CHECK-NEXT: [[ADD:%.*]] = add <vscale x 8 x i16> [[A:%.*]], [[SPLAT_SPLAT]] |
| // CHECK-NEXT: ret <vscale x 8 x i16> [[ADD]] |
| // |
| svuint16_t add_scalar_u16(svuint16_t a, uint16_t b) { |
| return a + b; |
| } |
| |
| // CHECK-LABEL: @add_scalar_u32( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[B:%.*]], i64 0 |
| // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[SPLAT_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer |
| // CHECK-NEXT: [[ADD:%.*]] = add <vscale x 4 x i32> [[A:%.*]], [[SPLAT_SPLAT]] |
| // CHECK-NEXT: ret <vscale x 4 x i32> [[ADD]] |
| // |
| svuint32_t add_scalar_u32(svuint32_t a, uint32_t b) { |
| return a + b; |
| } |
| |
| // CHECK-LABEL: @add_scalar_u64( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[B:%.*]], i64 0 |
| // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[SPLAT_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer |
| // CHECK-NEXT: [[ADD:%.*]] = add <vscale x 2 x i64> [[A:%.*]], [[SPLAT_SPLAT]] |
| // CHECK-NEXT: ret <vscale x 2 x i64> [[ADD]] |
| // |
| svuint64_t add_scalar_u64(svuint64_t a, uint64_t b) { |
| return a + b; |
| } |
| |
| // CHECK-LABEL: @add_scalar_f16( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 8 x half> poison, half [[B:%.*]], i64 0 |
| // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 8 x half> [[SPLAT_SPLATINSERT]], <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer |
| // CHECK-NEXT: [[ADD:%.*]] = fadd <vscale x 8 x half> [[A:%.*]], [[SPLAT_SPLAT]] |
| // CHECK-NEXT: ret <vscale x 8 x half> [[ADD]] |
| // |
| svfloat16_t add_scalar_f16(svfloat16_t a, __fp16 b) { |
| return a + b; |
| } |
| |
| // CHECK-LABEL: @add_scalar_f32( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 4 x float> poison, float [[B:%.*]], i64 0 |
| // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 4 x float> [[SPLAT_SPLATINSERT]], <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer |
| // CHECK-NEXT: [[ADD:%.*]] = fadd <vscale x 4 x float> [[A:%.*]], [[SPLAT_SPLAT]] |
| // CHECK-NEXT: ret <vscale x 4 x float> [[ADD]] |
| // |
| svfloat32_t add_scalar_f32(svfloat32_t a, float b) { |
| return a + b; |
| } |
| |
| // CHECK-LABEL: @add_scalar_f64( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 2 x double> poison, double [[B:%.*]], i64 0 |
| // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 2 x double> [[SPLAT_SPLATINSERT]], <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer |
| // CHECK-NEXT: [[ADD:%.*]] = fadd <vscale x 2 x double> [[A:%.*]], [[SPLAT_SPLAT]] |
| // CHECK-NEXT: ret <vscale x 2 x double> [[ADD]] |
| // |
| svfloat64_t add_scalar_f64(svfloat64_t a, double b) { |
| return a + b; |
| } |
| |
| // CHECK-LABEL: @add_i8_i_lit( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[ADD:%.*]] = add <vscale x 16 x i8> [[A:%.*]], zeroinitializer |
| // CHECK-NEXT: ret <vscale x 16 x i8> [[ADD]] |
| // |
| svint8_t add_i8_i_lit(svint8_t a) { |
| return a + 0; |
| } |
| |
| // CHECK-LABEL: @add_i8_il_lit( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[ADD:%.*]] = add <vscale x 16 x i8> [[A:%.*]], zeroinitializer |
| // CHECK-NEXT: ret <vscale x 16 x i8> [[ADD]] |
| // |
| svint8_t add_i8_il_lit(svint8_t a) { |
| return a + 0l; |
| } |
| |
| // CHECK-LABEL: @add_i8_ill_lit( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[ADD:%.*]] = add <vscale x 16 x i8> [[A:%.*]], zeroinitializer |
| // CHECK-NEXT: ret <vscale x 16 x i8> [[ADD]] |
| // |
| svint8_t add_i8_ill_lit(svint8_t a) { |
| return a + 0ll; |
| } |
| |
| // CHECK-LABEL: @add_i8_u_lit( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[ADD:%.*]] = add <vscale x 16 x i8> [[A:%.*]], zeroinitializer |
| // CHECK-NEXT: ret <vscale x 16 x i8> [[ADD]] |
| // |
| svint8_t add_i8_u_lit(svint8_t a) { |
| return a + 0u; |
| } |
| |
| // CHECK-LABEL: @add_i8_ul_lit( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[ADD:%.*]] = add <vscale x 16 x i8> [[A:%.*]], zeroinitializer |
| // CHECK-NEXT: ret <vscale x 16 x i8> [[ADD]] |
| // |
| svint8_t add_i8_ul_lit(svint8_t a) { |
| return a + 0ul; |
| } |
| |
| // CHECK-LABEL: @add_i8_ull_lit( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[ADD:%.*]] = add <vscale x 16 x i8> [[A:%.*]], zeroinitializer |
| // CHECK-NEXT: ret <vscale x 16 x i8> [[ADD]] |
| // |
| svint8_t add_i8_ull_lit(svint8_t a) { |
| return a + 0ull; |
| } |
| |
| // CHECK-LABEL: @add_f64_i_lit( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[ADD:%.*]] = fadd <vscale x 2 x double> [[A:%.*]], zeroinitializer |
| // CHECK-NEXT: ret <vscale x 2 x double> [[ADD]] |
| // |
| svfloat64_t add_f64_i_lit(svfloat64_t a) { |
| return a + 0; |
| } |
| |
| // CHECK-LABEL: @add_f64_il_lit( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[ADD:%.*]] = fadd <vscale x 2 x double> [[A:%.*]], zeroinitializer |
| // CHECK-NEXT: ret <vscale x 2 x double> [[ADD]] |
| // |
| svfloat64_t add_f64_il_lit(svfloat64_t a) { |
| return a + 0l; |
| } |
| |
| // CHECK-LABEL: @add_f64_ill_lit( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[ADD:%.*]] = fadd <vscale x 2 x double> [[A:%.*]], zeroinitializer |
| // CHECK-NEXT: ret <vscale x 2 x double> [[ADD]] |
| // |
| svfloat64_t add_f64_ill_lit(svfloat64_t a) { |
| return a + 0ll; |
| } |
| |
| // CHECK-LABEL: @add_f64_u_lit( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[ADD:%.*]] = fadd <vscale x 2 x double> [[A:%.*]], zeroinitializer |
| // CHECK-NEXT: ret <vscale x 2 x double> [[ADD]] |
| // |
| svfloat64_t add_f64_u_lit(svfloat64_t a) { |
| return a + 0u; |
| } |
| |
| // CHECK-LABEL: @add_f64_ul_lit( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[ADD:%.*]] = fadd <vscale x 2 x double> [[A:%.*]], zeroinitializer |
| // CHECK-NEXT: ret <vscale x 2 x double> [[ADD]] |
| // |
| svfloat64_t add_f64_ul_lit(svfloat64_t a) { |
| return a + 0ul; |
| } |
| |
| // CHECK-LABEL: @add_f64_ull_lit( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[ADD:%.*]] = fadd <vscale x 2 x double> [[A:%.*]], zeroinitializer |
| // CHECK-NEXT: ret <vscale x 2 x double> [[ADD]] |
| // |
| svfloat64_t add_f64_ull_lit(svfloat64_t a) { |
| return a + 0ull; |
| } |
| |
| // CHECK-LABEL: @add_f64_f_lit( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[ADD:%.*]] = fadd <vscale x 2 x double> [[A:%.*]], zeroinitializer |
| // CHECK-NEXT: ret <vscale x 2 x double> [[ADD]] |
| // |
| svfloat64_t add_f64_f_lit(svfloat64_t a) { |
| return a + 0.f; |
| } |
| |
| // CHECK-LABEL: @add_f64_d_lit( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[ADD:%.*]] = fadd <vscale x 2 x double> [[A:%.*]], zeroinitializer |
| // CHECK-NEXT: ret <vscale x 2 x double> [[ADD]] |
| // |
| svfloat64_t add_f64_d_lit(svfloat64_t a) { |
| return a + 0.; |
| } |
| |
| // SUBTRACTION |
| |
| // CHECK-LABEL: @sub_i8( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 16 x i8> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 16 x i8> [[SUB]] |
| // |
| svint8_t sub_i8(svint8_t a, svint8_t b) { |
| return a - b; |
| } |
| |
| // CHECK-LABEL: @sub_i16( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 8 x i16> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 8 x i16> [[SUB]] |
| // |
| svint16_t sub_i16(svint16_t a, svint16_t b) { |
| return a - b; |
| } |
| |
| // CHECK-LABEL: @sub_i32( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 4 x i32> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 4 x i32> [[SUB]] |
| // |
| svint32_t sub_i32(svint32_t a, svint32_t b) { |
| return a - b; |
| } |
| |
| // CHECK-LABEL: @sub_i64( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 2 x i64> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 2 x i64> [[SUB]] |
| // |
| svint64_t sub_i64(svint64_t a, svint64_t b) { |
| return a - b; |
| } |
| |
| // CHECK-LABEL: @sub_u8( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 16 x i8> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 16 x i8> [[SUB]] |
| // |
| svuint8_t sub_u8(svuint8_t a, svuint8_t b) { |
| return a - b; |
| } |
| |
| // CHECK-LABEL: @sub_u16( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 8 x i16> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 8 x i16> [[SUB]] |
| // |
| svuint16_t sub_u16(svuint16_t a, svuint16_t b) { |
| return a - b; |
| } |
| |
| // CHECK-LABEL: @sub_u32( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 4 x i32> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 4 x i32> [[SUB]] |
| // |
| svuint32_t sub_u32(svuint32_t a, svuint32_t b) { |
| return a - b; |
| } |
| |
| // CHECK-LABEL: @sub_u64( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 2 x i64> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 2 x i64> [[SUB]] |
| // |
| svuint64_t sub_u64(svuint64_t a, svuint64_t b) { |
| return a - b; |
| } |
| |
| // CHECK-LABEL: @sub_f16( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SUB:%.*]] = fsub <vscale x 8 x half> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 8 x half> [[SUB]] |
| // |
| svfloat16_t sub_f16(svfloat16_t a, svfloat16_t b) { |
| return a - b; |
| } |
| |
| // CHECK-LABEL: @sub_f32( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SUB:%.*]] = fsub <vscale x 4 x float> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 4 x float> [[SUB]] |
| // |
| svfloat32_t sub_f32(svfloat32_t a, svfloat32_t b) { |
| return a - b; |
| } |
| |
| // CHECK-LABEL: @sub_f64( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SUB:%.*]] = fsub <vscale x 2 x double> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 2 x double> [[SUB]] |
| // |
| svfloat64_t sub_f64(svfloat64_t a, svfloat64_t b) { |
| return a - b; |
| } |
| |
| // CHECK-LABEL: @sub_inplace_i8( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 16 x i8> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 16 x i8> [[SUB]] |
| // |
| svint8_t sub_inplace_i8(svint8_t a, svint8_t b) { |
| return a - b; |
| } |
| |
| // CHECK-LABEL: @sub_inplace_i16( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 8 x i16> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 8 x i16> [[SUB]] |
| // |
| svint16_t sub_inplace_i16(svint16_t a, svint16_t b) { |
| return a - b; |
| } |
| |
| // CHECK-LABEL: @sub_inplace_i32( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 4 x i32> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 4 x i32> [[SUB]] |
| // |
| svint32_t sub_inplace_i32(svint32_t a, svint32_t b) { |
| return a - b; |
| } |
| |
| // CHECK-LABEL: @sub_inplace_i64( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 2 x i64> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 2 x i64> [[SUB]] |
| // |
| svint64_t sub_inplace_i64(svint64_t a, svint64_t b) { |
| return a - b; |
| } |
| |
| // CHECK-LABEL: @sub_inplace_u8( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 16 x i8> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 16 x i8> [[SUB]] |
| // |
| svuint8_t sub_inplace_u8(svuint8_t a, svuint8_t b) { |
| return a - b; |
| } |
| |
| // CHECK-LABEL: @sub_inplace_u16( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 8 x i16> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 8 x i16> [[SUB]] |
| // |
| svuint16_t sub_inplace_u16(svuint16_t a, svuint16_t b) { |
| return a - b; |
| } |
| |
| // CHECK-LABEL: @sub_inplace_u32( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 4 x i32> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 4 x i32> [[SUB]] |
| // |
| svuint32_t sub_inplace_u32(svuint32_t a, svuint32_t b) { |
| return a - b; |
| } |
| |
| // CHECK-LABEL: @sub_inplace_u64( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 2 x i64> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 2 x i64> [[SUB]] |
| // |
| svuint64_t sub_inplace_u64(svuint64_t a, svuint64_t b) { |
| return a - b; |
| } |
| |
| // CHECK-LABEL: @sub_inplace_f16( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SUB:%.*]] = fsub <vscale x 8 x half> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 8 x half> [[SUB]] |
| // |
| svfloat16_t sub_inplace_f16(svfloat16_t a, svfloat16_t b) { |
| return a - b; |
| } |
| |
| // CHECK-LABEL: @sub_inplace_f32( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SUB:%.*]] = fsub <vscale x 4 x float> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 4 x float> [[SUB]] |
| // |
| svfloat32_t sub_inplace_f32(svfloat32_t a, svfloat32_t b) { |
| return a - b; |
| } |
| |
| // CHECK-LABEL: @sub_inplace_f64( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SUB:%.*]] = fsub <vscale x 2 x double> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 2 x double> [[SUB]] |
| // |
| svfloat64_t sub_inplace_f64(svfloat64_t a, svfloat64_t b) { |
| return a - b; |
| } |
| |
| // CHECK-LABEL: @sub_scalar_i8( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[B:%.*]], i64 0 |
| // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[SPLAT_SPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer |
| // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 16 x i8> [[A:%.*]], [[SPLAT_SPLAT]] |
| // CHECK-NEXT: ret <vscale x 16 x i8> [[SUB]] |
| // |
| svint8_t sub_scalar_i8(svint8_t a, int8_t b) { |
| return a - b; |
| } |
| |
| // CHECK-LABEL: @sub_scalar_i16( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[B:%.*]], i64 0 |
| // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 8 x i16> [[SPLAT_SPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer |
| // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 8 x i16> [[A:%.*]], [[SPLAT_SPLAT]] |
| // CHECK-NEXT: ret <vscale x 8 x i16> [[SUB]] |
| // |
| svint16_t sub_scalar_i16(svint16_t a, int16_t b) { |
| return a - b; |
| } |
| |
| // CHECK-LABEL: @sub_scalar_i32( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[B:%.*]], i64 0 |
| // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[SPLAT_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer |
| // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 4 x i32> [[A:%.*]], [[SPLAT_SPLAT]] |
| // CHECK-NEXT: ret <vscale x 4 x i32> [[SUB]] |
| // |
| svint32_t sub_scalar_i32(svint32_t a, int32_t b) { |
| return a - b; |
| } |
| |
| // CHECK-LABEL: @sub_scalar_i64( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[B:%.*]], i64 0 |
| // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[SPLAT_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer |
| // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 2 x i64> [[A:%.*]], [[SPLAT_SPLAT]] |
| // CHECK-NEXT: ret <vscale x 2 x i64> [[SUB]] |
| // |
| svint64_t sub_scalar_i64(svint64_t a, int64_t b) { |
| return a - b; |
| } |
| |
| // CHECK-LABEL: @sub_scalar_u8( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[B:%.*]], i64 0 |
| // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[SPLAT_SPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer |
| // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 16 x i8> [[A:%.*]], [[SPLAT_SPLAT]] |
| // CHECK-NEXT: ret <vscale x 16 x i8> [[SUB]] |
| // |
| svuint8_t sub_scalar_u8(svuint8_t a, uint8_t b) { |
| return a - b; |
| } |
| |
| // CHECK-LABEL: @sub_scalar_u16( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[B:%.*]], i64 0 |
| // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 8 x i16> [[SPLAT_SPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer |
| // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 8 x i16> [[A:%.*]], [[SPLAT_SPLAT]] |
| // CHECK-NEXT: ret <vscale x 8 x i16> [[SUB]] |
| // |
| svuint16_t sub_scalar_u16(svuint16_t a, uint16_t b) { |
| return a - b; |
| } |
| |
| // CHECK-LABEL: @sub_scalar_u32( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[B:%.*]], i64 0 |
| // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[SPLAT_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer |
| // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 4 x i32> [[A:%.*]], [[SPLAT_SPLAT]] |
| // CHECK-NEXT: ret <vscale x 4 x i32> [[SUB]] |
| // |
| svuint32_t sub_scalar_u32(svuint32_t a, uint32_t b) { |
| return a - b; |
| } |
| |
| // CHECK-LABEL: @sub_scalar_u64( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[B:%.*]], i64 0 |
| // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[SPLAT_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer |
| // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 2 x i64> [[A:%.*]], [[SPLAT_SPLAT]] |
| // CHECK-NEXT: ret <vscale x 2 x i64> [[SUB]] |
| // |
| svuint64_t sub_scalar_u64(svuint64_t a, uint64_t b) { |
| return a - b; |
| } |
| |
| // CHECK-LABEL: @sub_scalar_f16( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 8 x half> poison, half [[B:%.*]], i64 0 |
| // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 8 x half> [[SPLAT_SPLATINSERT]], <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer |
| // CHECK-NEXT: [[SUB:%.*]] = fsub <vscale x 8 x half> [[A:%.*]], [[SPLAT_SPLAT]] |
| // CHECK-NEXT: ret <vscale x 8 x half> [[SUB]] |
| // |
| svfloat16_t sub_scalar_f16(svfloat16_t a, __fp16 b) { |
| return a - b; |
| } |
| |
| // CHECK-LABEL: @sub_scalar_f32( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 4 x float> poison, float [[B:%.*]], i64 0 |
| // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 4 x float> [[SPLAT_SPLATINSERT]], <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer |
| // CHECK-NEXT: [[SUB:%.*]] = fsub <vscale x 4 x float> [[A:%.*]], [[SPLAT_SPLAT]] |
| // CHECK-NEXT: ret <vscale x 4 x float> [[SUB]] |
| // |
| svfloat32_t sub_scalar_f32(svfloat32_t a, float b) { |
| return a - b; |
| } |
| |
| // CHECK-LABEL: @sub_scalar_f64( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 2 x double> poison, double [[B:%.*]], i64 0 |
| // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 2 x double> [[SPLAT_SPLATINSERT]], <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer |
| // CHECK-NEXT: [[SUB:%.*]] = fsub <vscale x 2 x double> [[A:%.*]], [[SPLAT_SPLAT]] |
| // CHECK-NEXT: ret <vscale x 2 x double> [[SUB]] |
| // |
| svfloat64_t sub_scalar_f64(svfloat64_t a, double b) { |
| return a - b; |
| } |
| |
| // MULTIPLICATION |
| |
| // CHECK-LABEL: @mul_i8( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[MUL:%.*]] = mul <vscale x 16 x i8> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 16 x i8> [[MUL]] |
| // |
| svint8_t mul_i8(svint8_t a, svint8_t b) { |
| return a * b; |
| } |
| |
| // CHECK-LABEL: @mul_i16( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[MUL:%.*]] = mul <vscale x 8 x i16> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 8 x i16> [[MUL]] |
| // |
| svint16_t mul_i16(svint16_t a, svint16_t b) { |
| return a * b; |
| } |
| |
| // CHECK-LABEL: @mul_i32( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[MUL:%.*]] = mul <vscale x 4 x i32> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 4 x i32> [[MUL]] |
| // |
| svint32_t mul_i32(svint32_t a, svint32_t b) { |
| return a * b; |
| } |
| |
| // CHECK-LABEL: @mul_i64( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[MUL:%.*]] = mul <vscale x 2 x i64> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 2 x i64> [[MUL]] |
| // |
| svint64_t mul_i64(svint64_t a, svint64_t b) { |
| return a * b; |
| } |
| |
| // CHECK-LABEL: @mul_u8( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[MUL:%.*]] = mul <vscale x 16 x i8> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 16 x i8> [[MUL]] |
| // |
| svuint8_t mul_u8(svuint8_t a, svuint8_t b) { |
| return a * b; |
| } |
| |
| // CHECK-LABEL: @mul_u16( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[MUL:%.*]] = mul <vscale x 8 x i16> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 8 x i16> [[MUL]] |
| // |
| svuint16_t mul_u16(svuint16_t a, svuint16_t b) { |
| return a * b; |
| } |
| |
| // CHECK-LABEL: @mul_u32( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[MUL:%.*]] = mul <vscale x 4 x i32> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 4 x i32> [[MUL]] |
| // |
| svuint32_t mul_u32(svuint32_t a, svuint32_t b) { |
| return a * b; |
| } |
| |
| // CHECK-LABEL: @mul_u64( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[MUL:%.*]] = mul <vscale x 2 x i64> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 2 x i64> [[MUL]] |
| // |
| svuint64_t mul_u64(svuint64_t a, svuint64_t b) { |
| return a * b; |
| } |
| |
| // CHECK-LABEL: @mul_f16( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[MUL:%.*]] = fmul <vscale x 8 x half> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 8 x half> [[MUL]] |
| // |
| svfloat16_t mul_f16(svfloat16_t a, svfloat16_t b) { |
| return a * b; |
| } |
| |
| // CHECK-LABEL: @mul_f32( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[MUL:%.*]] = fmul <vscale x 4 x float> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 4 x float> [[MUL]] |
| // |
| svfloat32_t mul_f32(svfloat32_t a, svfloat32_t b) { |
| return a * b; |
| } |
| |
| // CHECK-LABEL: @mul_f64( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[MUL:%.*]] = fmul <vscale x 2 x double> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 2 x double> [[MUL]] |
| // |
| svfloat64_t mul_f64(svfloat64_t a, svfloat64_t b) { |
| return a * b; |
| } |
| |
| // CHECK-LABEL: @mul_inplace_i8( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[MUL:%.*]] = mul <vscale x 16 x i8> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 16 x i8> [[MUL]] |
| // |
| svint8_t mul_inplace_i8(svint8_t a, svint8_t b) { |
| return a * b; |
| } |
| |
| // CHECK-LABEL: @mul_inplace_i16( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[MUL:%.*]] = mul <vscale x 8 x i16> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 8 x i16> [[MUL]] |
| // |
| svint16_t mul_inplace_i16(svint16_t a, svint16_t b) { |
| return a * b; |
| } |
| |
| // CHECK-LABEL: @mul_inplace_i32( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[MUL:%.*]] = mul <vscale x 4 x i32> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 4 x i32> [[MUL]] |
| // |
| svint32_t mul_inplace_i32(svint32_t a, svint32_t b) { |
| return a * b; |
| } |
| |
| // CHECK-LABEL: @mul_inplace_i64( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[MUL:%.*]] = mul <vscale x 2 x i64> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 2 x i64> [[MUL]] |
| // |
| svint64_t mul_inplace_i64(svint64_t a, svint64_t b) { |
| return a * b; |
| } |
| |
| // CHECK-LABEL: @mul_inplace_u8( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[MUL:%.*]] = mul <vscale x 16 x i8> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 16 x i8> [[MUL]] |
| // |
| svuint8_t mul_inplace_u8(svuint8_t a, svuint8_t b) { |
| return a * b; |
| } |
| |
| // CHECK-LABEL: @mul_inplace_u16( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[MUL:%.*]] = mul <vscale x 8 x i16> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 8 x i16> [[MUL]] |
| // |
| svuint16_t mul_inplace_u16(svuint16_t a, svuint16_t b) { |
| return a * b; |
| } |
| |
| // CHECK-LABEL: @mul_inplace_u32( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[MUL:%.*]] = mul <vscale x 4 x i32> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 4 x i32> [[MUL]] |
| // |
| svuint32_t mul_inplace_u32(svuint32_t a, svuint32_t b) { |
| return a * b; |
| } |
| |
| // CHECK-LABEL: @mul_inplace_u64( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[MUL:%.*]] = mul <vscale x 2 x i64> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 2 x i64> [[MUL]] |
| // |
| svuint64_t mul_inplace_u64(svuint64_t a, svuint64_t b) { |
| return a * b; |
| } |
| |
| // CHECK-LABEL: @mul_inplace_f16( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[MUL:%.*]] = fmul <vscale x 8 x half> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 8 x half> [[MUL]] |
| // |
| svfloat16_t mul_inplace_f16(svfloat16_t a, svfloat16_t b) { |
| return a * b; |
| } |
| |
| // CHECK-LABEL: @mul_inplace_f32( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[MUL:%.*]] = fmul <vscale x 4 x float> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 4 x float> [[MUL]] |
| // |
| svfloat32_t mul_inplace_f32(svfloat32_t a, svfloat32_t b) { |
| return a * b; |
| } |
| |
| // CHECK-LABEL: @mul_inplace_f64( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[MUL:%.*]] = fmul <vscale x 2 x double> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 2 x double> [[MUL]] |
| // |
| svfloat64_t mul_inplace_f64(svfloat64_t a, svfloat64_t b) { |
| return a * b; |
| } |
| |
| // CHECK-LABEL: @mul_scalar_i8( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[B:%.*]], i64 0 |
| // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[SPLAT_SPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer |
| // CHECK-NEXT: [[MUL:%.*]] = mul <vscale x 16 x i8> [[A:%.*]], [[SPLAT_SPLAT]] |
| // CHECK-NEXT: ret <vscale x 16 x i8> [[MUL]] |
| // |
| svint8_t mul_scalar_i8(svint8_t a, int8_t b) { |
| return a * b; |
| } |
| |
| // CHECK-LABEL: @mul_scalar_i16( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[B:%.*]], i64 0 |
| // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 8 x i16> [[SPLAT_SPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer |
| // CHECK-NEXT: [[MUL:%.*]] = mul <vscale x 8 x i16> [[A:%.*]], [[SPLAT_SPLAT]] |
| // CHECK-NEXT: ret <vscale x 8 x i16> [[MUL]] |
| // |
| svint16_t mul_scalar_i16(svint16_t a, int16_t b) { |
| return a * b; |
| } |
| |
| // CHECK-LABEL: @mul_scalar_i32( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[B:%.*]], i64 0 |
| // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[SPLAT_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer |
| // CHECK-NEXT: [[MUL:%.*]] = mul <vscale x 4 x i32> [[A:%.*]], [[SPLAT_SPLAT]] |
| // CHECK-NEXT: ret <vscale x 4 x i32> [[MUL]] |
| // |
| svint32_t mul_scalar_i32(svint32_t a, int32_t b) { |
| return a * b; |
| } |
| |
| // CHECK-LABEL: @mul_scalar_i64( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[B:%.*]], i64 0 |
| // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[SPLAT_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer |
| // CHECK-NEXT: [[MUL:%.*]] = mul <vscale x 2 x i64> [[A:%.*]], [[SPLAT_SPLAT]] |
| // CHECK-NEXT: ret <vscale x 2 x i64> [[MUL]] |
| // |
| svint64_t mul_scalar_i64(svint64_t a, int64_t b) { |
| return a * b; |
| } |
| |
| // CHECK-LABEL: @mul_scalar_u8( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[B:%.*]], i64 0 |
| // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[SPLAT_SPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer |
| // CHECK-NEXT: [[MUL:%.*]] = mul <vscale x 16 x i8> [[A:%.*]], [[SPLAT_SPLAT]] |
| // CHECK-NEXT: ret <vscale x 16 x i8> [[MUL]] |
| // |
| svuint8_t mul_scalar_u8(svuint8_t a, uint8_t b) { |
| return a * b; |
| } |
| |
| // CHECK-LABEL: @mul_scalar_u16( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[B:%.*]], i64 0 |
| // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 8 x i16> [[SPLAT_SPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer |
| // CHECK-NEXT: [[MUL:%.*]] = mul <vscale x 8 x i16> [[A:%.*]], [[SPLAT_SPLAT]] |
| // CHECK-NEXT: ret <vscale x 8 x i16> [[MUL]] |
| // |
| svuint16_t mul_scalar_u16(svuint16_t a, uint16_t b) { |
| return a * b; |
| } |
| |
| // CHECK-LABEL: @mul_scalar_u32( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[B:%.*]], i64 0 |
| // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[SPLAT_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer |
| // CHECK-NEXT: [[MUL:%.*]] = mul <vscale x 4 x i32> [[A:%.*]], [[SPLAT_SPLAT]] |
| // CHECK-NEXT: ret <vscale x 4 x i32> [[MUL]] |
| // |
| svuint32_t mul_scalar_u32(svuint32_t a, uint32_t b) { |
| return a * b; |
| } |
| |
| // CHECK-LABEL: @mul_scalar_u64( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[B:%.*]], i64 0 |
| // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[SPLAT_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer |
| // CHECK-NEXT: [[MUL:%.*]] = mul <vscale x 2 x i64> [[A:%.*]], [[SPLAT_SPLAT]] |
| // CHECK-NEXT: ret <vscale x 2 x i64> [[MUL]] |
| // |
| svuint64_t mul_scalar_u64(svuint64_t a, uint64_t b) { |
| return a * b; |
| } |
| |
| // CHECK-LABEL: @mul_scalar_f16( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 8 x half> poison, half [[B:%.*]], i64 0 |
| // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 8 x half> [[SPLAT_SPLATINSERT]], <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer |
| // CHECK-NEXT: [[MUL:%.*]] = fmul <vscale x 8 x half> [[A:%.*]], [[SPLAT_SPLAT]] |
| // CHECK-NEXT: ret <vscale x 8 x half> [[MUL]] |
| // |
| svfloat16_t mul_scalar_f16(svfloat16_t a, __fp16 b) { |
| return a * b; |
| } |
| |
| // CHECK-LABEL: @mul_scalar_f32( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 4 x float> poison, float [[B:%.*]], i64 0 |
| // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 4 x float> [[SPLAT_SPLATINSERT]], <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer |
| // CHECK-NEXT: [[MUL:%.*]] = fmul <vscale x 4 x float> [[A:%.*]], [[SPLAT_SPLAT]] |
| // CHECK-NEXT: ret <vscale x 4 x float> [[MUL]] |
| // |
| svfloat32_t mul_scalar_f32(svfloat32_t a, float b) { |
| return a * b; |
| } |
| |
| // CHECK-LABEL: @mul_scalar_f64( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 2 x double> poison, double [[B:%.*]], i64 0 |
| // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 2 x double> [[SPLAT_SPLATINSERT]], <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer |
| // CHECK-NEXT: [[MUL:%.*]] = fmul <vscale x 2 x double> [[A:%.*]], [[SPLAT_SPLAT]] |
| // CHECK-NEXT: ret <vscale x 2 x double> [[MUL]] |
| // |
| svfloat64_t mul_scalar_f64(svfloat64_t a, double b) { |
| return a * b; |
| } |
| |
| // DIVISION |
| |
| // CHECK-LABEL: @div_i8( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[DIV:%.*]] = sdiv <vscale x 16 x i8> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 16 x i8> [[DIV]] |
| // |
| svint8_t div_i8(svint8_t a, svint8_t b) { |
| return a / b; |
| } |
| |
| // CHECK-LABEL: @div_i16( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[DIV:%.*]] = sdiv <vscale x 8 x i16> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 8 x i16> [[DIV]] |
| // |
| svint16_t div_i16(svint16_t a, svint16_t b) { |
| return a / b; |
| } |
| |
| // CHECK-LABEL: @div_i32( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[DIV:%.*]] = sdiv <vscale x 4 x i32> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 4 x i32> [[DIV]] |
| // |
| svint32_t div_i32(svint32_t a, svint32_t b) { |
| return a / b; |
| } |
| |
| // CHECK-LABEL: @div_i64( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[DIV:%.*]] = sdiv <vscale x 2 x i64> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 2 x i64> [[DIV]] |
| // |
| svint64_t div_i64(svint64_t a, svint64_t b) { |
| return a / b; |
| } |
| |
| // CHECK-LABEL: @div_u8( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[DIV:%.*]] = udiv <vscale x 16 x i8> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 16 x i8> [[DIV]] |
| // |
| svuint8_t div_u8(svuint8_t a, svuint8_t b) { |
| return a / b; |
| } |
| |
| // CHECK-LABEL: @div_u16( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[DIV:%.*]] = udiv <vscale x 8 x i16> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 8 x i16> [[DIV]] |
| // |
| svuint16_t div_u16(svuint16_t a, svuint16_t b) { |
| return a / b; |
| } |
| |
| // CHECK-LABEL: @div_u32( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[DIV:%.*]] = udiv <vscale x 4 x i32> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 4 x i32> [[DIV]] |
| // |
| svuint32_t div_u32(svuint32_t a, svuint32_t b) { |
| return a / b; |
| } |
| |
| // CHECK-LABEL: @div_u64( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[DIV:%.*]] = udiv <vscale x 2 x i64> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 2 x i64> [[DIV]] |
| // |
| svuint64_t div_u64(svuint64_t a, svuint64_t b) { |
| return a / b; |
| } |
| |
| // CHECK-LABEL: @div_f16( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[DIV:%.*]] = fdiv <vscale x 8 x half> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 8 x half> [[DIV]] |
| // |
| svfloat16_t div_f16(svfloat16_t a, svfloat16_t b) { |
| return a / b; |
| } |
| |
| // CHECK-LABEL: @div_f32( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[DIV:%.*]] = fdiv <vscale x 4 x float> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 4 x float> [[DIV]] |
| // |
| svfloat32_t div_f32(svfloat32_t a, svfloat32_t b) { |
| return a / b; |
| } |
| |
| // CHECK-LABEL: @div_f64( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[DIV:%.*]] = fdiv <vscale x 2 x double> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 2 x double> [[DIV]] |
| // |
| svfloat64_t div_f64(svfloat64_t a, svfloat64_t b) { |
| return a / b; |
| } |
| |
| // CHECK-LABEL: @div_inplace_i8( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[DIV:%.*]] = sdiv <vscale x 16 x i8> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 16 x i8> [[DIV]] |
| // |
| svint8_t div_inplace_i8(svint8_t a, svint8_t b) { |
| return a / b; |
| } |
| |
| // CHECK-LABEL: @div_inplace_i16( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[DIV:%.*]] = sdiv <vscale x 8 x i16> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 8 x i16> [[DIV]] |
| // |
| svint16_t div_inplace_i16(svint16_t a, svint16_t b) { |
| return a / b; |
| } |
| |
| // CHECK-LABEL: @div_inplace_i32( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[DIV:%.*]] = sdiv <vscale x 4 x i32> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 4 x i32> [[DIV]] |
| // |
| svint32_t div_inplace_i32(svint32_t a, svint32_t b) { |
| return a / b; |
| } |
| |
| // CHECK-LABEL: @div_inplace_i64( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[DIV:%.*]] = sdiv <vscale x 2 x i64> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 2 x i64> [[DIV]] |
| // |
| svint64_t div_inplace_i64(svint64_t a, svint64_t b) { |
| return a / b; |
| } |
| |
| // CHECK-LABEL: @div_inplace_u8( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[DIV:%.*]] = udiv <vscale x 16 x i8> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 16 x i8> [[DIV]] |
| // |
| svuint8_t div_inplace_u8(svuint8_t a, svuint8_t b) { |
| return a / b; |
| } |
| |
| // CHECK-LABEL: @div_inplace_u16( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[DIV:%.*]] = udiv <vscale x 8 x i16> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 8 x i16> [[DIV]] |
| // |
| svuint16_t div_inplace_u16(svuint16_t a, svuint16_t b) { |
| return a / b; |
| } |
| |
| // CHECK-LABEL: @div_inplace_u32( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[DIV:%.*]] = udiv <vscale x 4 x i32> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 4 x i32> [[DIV]] |
| // |
| svuint32_t div_inplace_u32(svuint32_t a, svuint32_t b) { |
| return a / b; |
| } |
| |
| // CHECK-LABEL: @div_inplace_u64( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[DIV:%.*]] = udiv <vscale x 2 x i64> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 2 x i64> [[DIV]] |
| // |
| svuint64_t div_inplace_u64(svuint64_t a, svuint64_t b) { |
| return a / b; |
| } |
| |
| // CHECK-LABEL: @div_inplace_f16( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[DIV:%.*]] = fdiv <vscale x 8 x half> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 8 x half> [[DIV]] |
| // |
| svfloat16_t div_inplace_f16(svfloat16_t a, svfloat16_t b) { |
| return a / b; |
| } |
| |
| // CHECK-LABEL: @div_inplace_f32( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[DIV:%.*]] = fdiv <vscale x 4 x float> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 4 x float> [[DIV]] |
| // |
| svfloat32_t div_inplace_f32(svfloat32_t a, svfloat32_t b) { |
| return a / b; |
| } |
| |
| // CHECK-LABEL: @div_inplace_f64( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[DIV:%.*]] = fdiv <vscale x 2 x double> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 2 x double> [[DIV]] |
| // |
| svfloat64_t div_inplace_f64(svfloat64_t a, svfloat64_t b) { |
| return a / b; |
| } |
| |
| // CHECK-LABEL: @div_scalar_i8( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[B:%.*]], i64 0 |
| // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[SPLAT_SPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer |
| // CHECK-NEXT: [[DIV:%.*]] = sdiv <vscale x 16 x i8> [[A:%.*]], [[SPLAT_SPLAT]] |
| // CHECK-NEXT: ret <vscale x 16 x i8> [[DIV]] |
| // |
| svint8_t div_scalar_i8(svint8_t a, int8_t b) { |
| return a / b; |
| } |
| |
| // CHECK-LABEL: @div_scalar_i16( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[B:%.*]], i64 0 |
| // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 8 x i16> [[SPLAT_SPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer |
| // CHECK-NEXT: [[DIV:%.*]] = sdiv <vscale x 8 x i16> [[A:%.*]], [[SPLAT_SPLAT]] |
| // CHECK-NEXT: ret <vscale x 8 x i16> [[DIV]] |
| // |
| svint16_t div_scalar_i16(svint16_t a, int16_t b) { |
| return a / b; |
| } |
| |
| // CHECK-LABEL: @div_scalar_i32( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[B:%.*]], i64 0 |
| // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[SPLAT_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer |
| // CHECK-NEXT: [[DIV:%.*]] = sdiv <vscale x 4 x i32> [[A:%.*]], [[SPLAT_SPLAT]] |
| // CHECK-NEXT: ret <vscale x 4 x i32> [[DIV]] |
| // |
| svint32_t div_scalar_i32(svint32_t a, int32_t b) { |
| return a / b; |
| } |
| |
| // CHECK-LABEL: @div_scalar_i64( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[B:%.*]], i64 0 |
| // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[SPLAT_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer |
| // CHECK-NEXT: [[DIV:%.*]] = sdiv <vscale x 2 x i64> [[A:%.*]], [[SPLAT_SPLAT]] |
| // CHECK-NEXT: ret <vscale x 2 x i64> [[DIV]] |
| // |
| svint64_t div_scalar_i64(svint64_t a, int64_t b) { |
| return a / b; |
| } |
| |
| // CHECK-LABEL: @div_scalar_u8( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[B:%.*]], i64 0 |
| // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[SPLAT_SPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer |
| // CHECK-NEXT: [[DIV:%.*]] = udiv <vscale x 16 x i8> [[A:%.*]], [[SPLAT_SPLAT]] |
| // CHECK-NEXT: ret <vscale x 16 x i8> [[DIV]] |
| // |
| svuint8_t div_scalar_u8(svuint8_t a, uint8_t b) { |
| return a / b; |
| } |
| |
| // CHECK-LABEL: @div_scalar_u16( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[B:%.*]], i64 0 |
| // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 8 x i16> [[SPLAT_SPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer |
| // CHECK-NEXT: [[DIV:%.*]] = udiv <vscale x 8 x i16> [[A:%.*]], [[SPLAT_SPLAT]] |
| // CHECK-NEXT: ret <vscale x 8 x i16> [[DIV]] |
| // |
| svuint16_t div_scalar_u16(svuint16_t a, uint16_t b) { |
| return a / b; |
| } |
| |
| // CHECK-LABEL: @div_scalar_u32( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[B:%.*]], i64 0 |
| // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[SPLAT_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer |
| // CHECK-NEXT: [[DIV:%.*]] = udiv <vscale x 4 x i32> [[A:%.*]], [[SPLAT_SPLAT]] |
| // CHECK-NEXT: ret <vscale x 4 x i32> [[DIV]] |
| // |
| svuint32_t div_scalar_u32(svuint32_t a, uint32_t b) { |
| return a / b; |
| } |
| |
| // CHECK-LABEL: @div_scalar_u64( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[B:%.*]], i64 0 |
| // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[SPLAT_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer |
| // CHECK-NEXT: [[DIV:%.*]] = udiv <vscale x 2 x i64> [[A:%.*]], [[SPLAT_SPLAT]] |
| // CHECK-NEXT: ret <vscale x 2 x i64> [[DIV]] |
| // |
| svuint64_t div_scalar_u64(svuint64_t a, uint64_t b) { |
| return a / b; |
| } |
| |
| // CHECK-LABEL: @div_scalar_f16( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 8 x half> poison, half [[B:%.*]], i64 0 |
| // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 8 x half> [[SPLAT_SPLATINSERT]], <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer |
| // CHECK-NEXT: [[DIV:%.*]] = fdiv <vscale x 8 x half> [[A:%.*]], [[SPLAT_SPLAT]] |
| // CHECK-NEXT: ret <vscale x 8 x half> [[DIV]] |
| // |
| svfloat16_t div_scalar_f16(svfloat16_t a, __fp16 b) { |
| return a / b; |
| } |
| |
| // CHECK-LABEL: @div_scalar_f32( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 4 x float> poison, float [[B:%.*]], i64 0 |
| // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 4 x float> [[SPLAT_SPLATINSERT]], <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer |
| // CHECK-NEXT: [[DIV:%.*]] = fdiv <vscale x 4 x float> [[A:%.*]], [[SPLAT_SPLAT]] |
| // CHECK-NEXT: ret <vscale x 4 x float> [[DIV]] |
| // |
| svfloat32_t div_scalar_f32(svfloat32_t a, float b) { |
| return a / b; |
| } |
| |
| // CHECK-LABEL: @div_scalar_f64( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 2 x double> poison, double [[B:%.*]], i64 0 |
| // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 2 x double> [[SPLAT_SPLATINSERT]], <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer |
| // CHECK-NEXT: [[DIV:%.*]] = fdiv <vscale x 2 x double> [[A:%.*]], [[SPLAT_SPLAT]] |
| // CHECK-NEXT: ret <vscale x 2 x double> [[DIV]] |
| // |
| svfloat64_t div_scalar_f64(svfloat64_t a, double b) { |
| return a / b; |
| } |
| |
| // REMAINDER |
| |
| // CHECK-LABEL: @rem_i8( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[REM:%.*]] = srem <vscale x 16 x i8> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 16 x i8> [[REM]] |
| // |
| svint8_t rem_i8(svint8_t a, svint8_t b) { |
| return a % b; |
| } |
| |
| // CHECK-LABEL: @rem_i16( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[REM:%.*]] = srem <vscale x 8 x i16> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 8 x i16> [[REM]] |
| // |
| svint16_t rem_i16(svint16_t a, svint16_t b) { |
| return a % b; |
| } |
| |
| // CHECK-LABEL: @rem_i32( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[REM:%.*]] = srem <vscale x 4 x i32> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 4 x i32> [[REM]] |
| // |
| svint32_t rem_i32(svint32_t a, svint32_t b) { |
| return a % b; |
| } |
| |
| // CHECK-LABEL: @rem_i64( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[REM:%.*]] = srem <vscale x 2 x i64> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 2 x i64> [[REM]] |
| // |
| svint64_t rem_i64(svint64_t a, svint64_t b) { |
| return a % b; |
| } |
| |
| // CHECK-LABEL: @rem_u8( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[REM:%.*]] = urem <vscale x 16 x i8> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 16 x i8> [[REM]] |
| // |
| svuint8_t rem_u8(svuint8_t a, svuint8_t b) { |
| return a % b; |
| } |
| |
| // CHECK-LABEL: @rem_u16( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[REM:%.*]] = urem <vscale x 8 x i16> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 8 x i16> [[REM]] |
| // |
| svuint16_t rem_u16(svuint16_t a, svuint16_t b) { |
| return a % b; |
| } |
| |
| // CHECK-LABEL: @rem_u32( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[REM:%.*]] = urem <vscale x 4 x i32> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 4 x i32> [[REM]] |
| // |
| svuint32_t rem_u32(svuint32_t a, svuint32_t b) { |
| return a % b; |
| } |
| |
| // CHECK-LABEL: @rem_u64( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[REM:%.*]] = urem <vscale x 2 x i64> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 2 x i64> [[REM]] |
| // |
| svuint64_t rem_u64(svuint64_t a, svuint64_t b) { |
| return a % b; |
| } |
| |
| // CHECK-LABEL: @rem_inplace_i8( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[REM:%.*]] = srem <vscale x 16 x i8> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 16 x i8> [[REM]] |
| // |
| svint8_t rem_inplace_i8(svint8_t a, svint8_t b) { |
| return a % b; |
| } |
| |
| // CHECK-LABEL: @rem_inplace_i16( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[REM:%.*]] = srem <vscale x 8 x i16> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 8 x i16> [[REM]] |
| // |
| svint16_t rem_inplace_i16(svint16_t a, svint16_t b) { |
| return a % b; |
| } |
| |
| // CHECK-LABEL: @rem_inplace_i32( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[REM:%.*]] = srem <vscale x 4 x i32> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 4 x i32> [[REM]] |
| // |
| svint32_t rem_inplace_i32(svint32_t a, svint32_t b) { |
| return a % b; |
| } |
| |
| // CHECK-LABEL: @rem_inplace_i64( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[REM:%.*]] = srem <vscale x 2 x i64> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 2 x i64> [[REM]] |
| // |
| svint64_t rem_inplace_i64(svint64_t a, svint64_t b) { |
| return a % b; |
| } |
| |
| // CHECK-LABEL: @rem_inplace_u8( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[REM:%.*]] = urem <vscale x 16 x i8> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 16 x i8> [[REM]] |
| // |
| svuint8_t rem_inplace_u8(svuint8_t a, svuint8_t b) { |
| return a % b; |
| } |
| |
| // CHECK-LABEL: @rem_inplace_u16( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[REM:%.*]] = urem <vscale x 8 x i16> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 8 x i16> [[REM]] |
| // |
| svuint16_t rem_inplace_u16(svuint16_t a, svuint16_t b) { |
| return a % b; |
| } |
| |
| // CHECK-LABEL: @rem_inplace_u32( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[REM:%.*]] = urem <vscale x 4 x i32> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 4 x i32> [[REM]] |
| // |
| svuint32_t rem_inplace_u32(svuint32_t a, svuint32_t b) { |
| return a % b; |
| } |
| |
| // CHECK-LABEL: @rem_inplace_u64( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[REM:%.*]] = urem <vscale x 2 x i64> [[A:%.*]], [[B:%.*]] |
| // CHECK-NEXT: ret <vscale x 2 x i64> [[REM]] |
| // |
| svuint64_t rem_inplace_u64(svuint64_t a, svuint64_t b) { |
| return a % b; |
| } |
| |
| // CHECK-LABEL: @rem_scalar_i8( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[B:%.*]], i64 0 |
| // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[SPLAT_SPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer |
| // CHECK-NEXT: [[REM:%.*]] = srem <vscale x 16 x i8> [[A:%.*]], [[SPLAT_SPLAT]] |
| // CHECK-NEXT: ret <vscale x 16 x i8> [[REM]] |
| // |
| svint8_t rem_scalar_i8(svint8_t a, int8_t b) { |
| return a % b; |
| } |
| |
| // CHECK-LABEL: @rem_scalar_i16( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[B:%.*]], i64 0 |
| // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 8 x i16> [[SPLAT_SPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer |
| // CHECK-NEXT: [[REM:%.*]] = srem <vscale x 8 x i16> [[A:%.*]], [[SPLAT_SPLAT]] |
| // CHECK-NEXT: ret <vscale x 8 x i16> [[REM]] |
| // |
| svint16_t rem_scalar_i16(svint16_t a, int16_t b) { |
| return a % b; |
| } |
| |
| // CHECK-LABEL: @rem_scalar_i32( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[B:%.*]], i64 0 |
| // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[SPLAT_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer |
| // CHECK-NEXT: [[REM:%.*]] = srem <vscale x 4 x i32> [[A:%.*]], [[SPLAT_SPLAT]] |
| // CHECK-NEXT: ret <vscale x 4 x i32> [[REM]] |
| // |
| svint32_t rem_scalar_i32(svint32_t a, int32_t b) { |
| return a % b; |
| } |
| |
| // CHECK-LABEL: @rem_scalar_i64( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[B:%.*]], i64 0 |
| // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[SPLAT_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer |
| // CHECK-NEXT: [[REM:%.*]] = srem <vscale x 2 x i64> [[A:%.*]], [[SPLAT_SPLAT]] |
| // CHECK-NEXT: ret <vscale x 2 x i64> [[REM]] |
| // |
| svint64_t rem_scalar_i64(svint64_t a, int64_t b) { |
| return a % b; |
| } |
| |
| // CHECK-LABEL: @rem_scalar_u8( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[B:%.*]], i64 0 |
| // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[SPLAT_SPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer |
| // CHECK-NEXT: [[REM:%.*]] = urem <vscale x 16 x i8> [[A:%.*]], [[SPLAT_SPLAT]] |
| // CHECK-NEXT: ret <vscale x 16 x i8> [[REM]] |
| // |
| svuint8_t rem_scalar_u8(svuint8_t a, uint8_t b) { |
| return a % b; |
| } |
| |
| // CHECK-LABEL: @rem_scalar_u16( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[B:%.*]], i64 0 |
| // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 8 x i16> [[SPLAT_SPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer |
| // CHECK-NEXT: [[REM:%.*]] = urem <vscale x 8 x i16> [[A:%.*]], [[SPLAT_SPLAT]] |
| // CHECK-NEXT: ret <vscale x 8 x i16> [[REM]] |
| // |
| svuint16_t rem_scalar_u16(svuint16_t a, uint16_t b) { |
| return a % b; |
| } |
| |
| // CHECK-LABEL: @rem_scalar_u32( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[B:%.*]], i64 0 |
| // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[SPLAT_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer |
| // CHECK-NEXT: [[REM:%.*]] = urem <vscale x 4 x i32> [[A:%.*]], [[SPLAT_SPLAT]] |
| // CHECK-NEXT: ret <vscale x 4 x i32> [[REM]] |
| // |
| svuint32_t rem_scalar_u32(svuint32_t a, uint32_t b) { |
| return a % b; |
| } |
| |
| // CHECK-LABEL: @rem_scalar_u64( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[B:%.*]], i64 0 |
| // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[SPLAT_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer |
| // CHECK-NEXT: [[REM:%.*]] = urem <vscale x 2 x i64> [[A:%.*]], [[SPLAT_SPLAT]] |
| // CHECK-NEXT: ret <vscale x 2 x i64> [[REM]] |
| // |
| svuint64_t rem_scalar_u64(svuint64_t a, uint64_t b) { |
| return a % b; |
| } |
| |
| // UNARY PROMOTION |
| |
| // CHECK-LABEL: @prom_i8( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: ret <vscale x 16 x i8> [[A:%.*]] |
| // |
| svint8_t prom_i8(svint8_t a) { |
| return +a; |
| } |
| |
| // CHECK-LABEL: @prom_i16( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: ret <vscale x 8 x i16> [[A:%.*]] |
| // |
| svint16_t prom_i16(svint16_t a) { |
| return +a; |
| } |
| |
| // CHECK-LABEL: @prom_i32( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: ret <vscale x 4 x i32> [[A:%.*]] |
| // |
| svint32_t prom_i32(svint32_t a) { |
| return +a; |
| } |
| |
| // CHECK-LABEL: @prom_i64( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: ret <vscale x 2 x i64> [[A:%.*]] |
| // |
| svint64_t prom_i64(svint64_t a) { |
| return +a; |
| } |
| |
| // CHECK-LABEL: @prom_u8( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: ret <vscale x 16 x i8> [[A:%.*]] |
| // |
| svuint8_t prom_u8(svuint8_t a) { |
| return +a; |
| } |
| |
| // CHECK-LABEL: @prom_u16( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: ret <vscale x 8 x i16> [[A:%.*]] |
| // |
| svuint16_t prom_u16(svuint16_t a) { |
| return +a; |
| } |
| |
| // CHECK-LABEL: @prom_u32( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: ret <vscale x 4 x i32> [[A:%.*]] |
| // |
| svuint32_t prom_u32(svuint32_t a) { |
| return +a; |
| } |
| |
| // CHECK-LABEL: @prom_u64( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: ret <vscale x 2 x i64> [[A:%.*]] |
| // |
| svuint64_t prom_u64(svuint64_t a) { |
| return +a; |
| } |
| |
| // UNARY NEGATION |
| |
| // CHECK-LABEL: @neg_i8( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[A:%.*]] |
| // CHECK-NEXT: ret <vscale x 16 x i8> [[SUB]] |
| // |
| svint8_t neg_i8(svint8_t a) { |
| return -a; |
| } |
| |
| // CHECK-LABEL: @neg_i16( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 8 x i16> zeroinitializer, [[A:%.*]] |
| // CHECK-NEXT: ret <vscale x 8 x i16> [[SUB]] |
| // |
| svint16_t neg_i16(svint16_t a) { |
| return -a; |
| } |
| |
| // CHECK-LABEL: @neg_i32( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 4 x i32> zeroinitializer, [[A:%.*]] |
| // CHECK-NEXT: ret <vscale x 4 x i32> [[SUB]] |
| // |
| svint32_t neg_i32(svint32_t a) { |
| return -a; |
| } |
| |
| // CHECK-LABEL: @neg_i64( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 2 x i64> zeroinitializer, [[A:%.*]] |
| // CHECK-NEXT: ret <vscale x 2 x i64> [[SUB]] |
| // |
| svint64_t neg_i64(svint64_t a) { |
| return -a; |
| } |
| |
| // CHECK-LABEL: @neg_u8( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[A:%.*]] |
| // CHECK-NEXT: ret <vscale x 16 x i8> [[SUB]] |
| // |
| svuint8_t neg_u8(svuint8_t a) { |
| return -a; |
| } |
| |
| // CHECK-LABEL: @neg_u16( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 8 x i16> zeroinitializer, [[A:%.*]] |
| // CHECK-NEXT: ret <vscale x 8 x i16> [[SUB]] |
| // |
| svuint16_t neg_u16(svuint16_t a) { |
| return -a; |
| } |
| |
| // CHECK-LABEL: @neg_u32( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 4 x i32> zeroinitializer, [[A:%.*]] |
| // CHECK-NEXT: ret <vscale x 4 x i32> [[SUB]] |
| // |
| svuint32_t neg_u32(svuint32_t a) { |
| return -a; |
| } |
| |
| // CHECK-LABEL: @neg_u64( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 2 x i64> zeroinitializer, [[A:%.*]] |
| // CHECK-NEXT: ret <vscale x 2 x i64> [[SUB]] |
| // |
| svuint64_t neg_u64(svuint64_t a) { |
| return -a; |
| } |