blob: bb97707a7a9a13f31a93828df2559101a00300a4 [file] [log] [blame]
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// RUN: %clang_cc1 -triple riscv64-none-linux-gnu -target-feature +zve64d \
// RUN: -target-feature +f -target-feature +d -disable-O0-optnone \
// RUN: -mvscale-min=4 -mvscale-max=4 -emit-llvm -o - %s | \
// RUN: opt -S -passes=sroa | FileCheck %s
// REQUIRES: riscv-registered-target
#include <stdint.h>
typedef __rvv_int8m1_t vint8m1_t;
typedef __rvv_uint8m1_t vuint8m1_t;
typedef __rvv_int16m1_t vint16m1_t;
typedef __rvv_uint16m1_t vuint16m1_t;
typedef __rvv_int32m1_t vint32m1_t;
typedef __rvv_uint32m1_t vuint32m1_t;
typedef __rvv_int64m1_t vint64m1_t;
typedef __rvv_uint64m1_t vuint64m1_t;
typedef __rvv_float32m1_t vfloat32m1_t;
typedef __rvv_float64m1_t vfloat64m1_t;
typedef vint8m1_t fixed_int8m1_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen)));
typedef vint16m1_t fixed_int16m1_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen)));
typedef vint32m1_t fixed_int32m1_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen)));
typedef vint64m1_t fixed_int64m1_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen)));
typedef vuint8m1_t fixed_uint8m1_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen)));
typedef vuint16m1_t fixed_uint16m1_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen)));
typedef vuint32m1_t fixed_uint32m1_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen)));
typedef vuint64m1_t fixed_uint64m1_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen)));
typedef vfloat32m1_t fixed_float32m1_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen)));
typedef vfloat64m1_t fixed_float64m1_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen)));
// AND
// CHECK-LABEL: @and_i8(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[AND:%.*]] = and <32 x i8> [[A]], [[B]]
// CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[AND]], i64 0)
// CHECK-NEXT: ret <vscale x 8 x i8> [[CAST_SCALABLE]]
//
fixed_int8m1_t and_i8(fixed_int8m1_t a, fixed_int8m1_t b) {
return a & b;
}
// CHECK-LABEL: @and_i16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[AND:%.*]] = and <16 x i16> [[A]], [[B]]
// CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[AND]], i64 0)
// CHECK-NEXT: ret <vscale x 4 x i16> [[CAST_SCALABLE]]
//
fixed_int16m1_t and_i16(fixed_int16m1_t a, fixed_int16m1_t b) {
return a & b;
}
// CHECK-LABEL: @and_i32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[AND:%.*]] = and <8 x i32> [[A]], [[B]]
// CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[AND]], i64 0)
// CHECK-NEXT: ret <vscale x 2 x i32> [[CAST_SCALABLE]]
//
fixed_int32m1_t and_i32(fixed_int32m1_t a, fixed_int32m1_t b) {
return a & b;
}
// CHECK-LABEL: @and_i64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[AND:%.*]] = and <4 x i64> [[A]], [[B]]
// CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[AND]], i64 0)
// CHECK-NEXT: ret <vscale x 1 x i64> [[CAST_SCALABLE]]
//
fixed_int64m1_t and_i64(fixed_int64m1_t a, fixed_int64m1_t b) {
return a & b;
}
// CHECK-LABEL: @and_u8(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[AND:%.*]] = and <32 x i8> [[A]], [[B]]
// CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[AND]], i64 0)
// CHECK-NEXT: ret <vscale x 8 x i8> [[CAST_SCALABLE]]
//
fixed_uint8m1_t and_u8(fixed_uint8m1_t a, fixed_uint8m1_t b) {
return a & b;
}
// CHECK-LABEL: @and_u16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[AND:%.*]] = and <16 x i16> [[A]], [[B]]
// CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[AND]], i64 0)
// CHECK-NEXT: ret <vscale x 4 x i16> [[CAST_SCALABLE]]
//
fixed_uint16m1_t and_u16(fixed_uint16m1_t a, fixed_uint16m1_t b) {
return a & b;
}
// CHECK-LABEL: @and_u32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[AND:%.*]] = and <8 x i32> [[A]], [[B]]
// CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[AND]], i64 0)
// CHECK-NEXT: ret <vscale x 2 x i32> [[CAST_SCALABLE]]
//
fixed_uint32m1_t and_u32(fixed_uint32m1_t a, fixed_uint32m1_t b) {
return a & b;
}
// CHECK-LABEL: @and_u64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[AND:%.*]] = and <4 x i64> [[A]], [[B]]
// CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[AND]], i64 0)
// CHECK-NEXT: ret <vscale x 1 x i64> [[CAST_SCALABLE]]
//
fixed_uint64m1_t and_u64(fixed_uint64m1_t a, fixed_uint64m1_t b) {
return a & b;
}
// OR
// CHECK-LABEL: @or_i8(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[OR:%.*]] = or <32 x i8> [[A]], [[B]]
// CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[OR]], i64 0)
// CHECK-NEXT: ret <vscale x 8 x i8> [[CAST_SCALABLE]]
//
fixed_int8m1_t or_i8(fixed_int8m1_t a, fixed_int8m1_t b) {
return a | b;
}
// CHECK-LABEL: @or_i16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[OR:%.*]] = or <16 x i16> [[A]], [[B]]
// CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[OR]], i64 0)
// CHECK-NEXT: ret <vscale x 4 x i16> [[CAST_SCALABLE]]
//
fixed_int16m1_t or_i16(fixed_int16m1_t a, fixed_int16m1_t b) {
return a | b;
}
// CHECK-LABEL: @or_i32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[OR:%.*]] = or <8 x i32> [[A]], [[B]]
// CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[OR]], i64 0)
// CHECK-NEXT: ret <vscale x 2 x i32> [[CAST_SCALABLE]]
//
fixed_int32m1_t or_i32(fixed_int32m1_t a, fixed_int32m1_t b) {
return a | b;
}
// CHECK-LABEL: @or_i64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[OR:%.*]] = or <4 x i64> [[A]], [[B]]
// CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[OR]], i64 0)
// CHECK-NEXT: ret <vscale x 1 x i64> [[CAST_SCALABLE]]
//
fixed_int64m1_t or_i64(fixed_int64m1_t a, fixed_int64m1_t b) {
return a | b;
}
// CHECK-LABEL: @or_u8(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[OR:%.*]] = or <32 x i8> [[A]], [[B]]
// CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[OR]], i64 0)
// CHECK-NEXT: ret <vscale x 8 x i8> [[CAST_SCALABLE]]
//
fixed_uint8m1_t or_u8(fixed_uint8m1_t a, fixed_uint8m1_t b) {
return a | b;
}
// CHECK-LABEL: @or_u16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[OR:%.*]] = or <16 x i16> [[A]], [[B]]
// CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[OR]], i64 0)
// CHECK-NEXT: ret <vscale x 4 x i16> [[CAST_SCALABLE]]
//
fixed_uint16m1_t or_u16(fixed_uint16m1_t a, fixed_uint16m1_t b) {
return a | b;
}
// CHECK-LABEL: @or_u32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[OR:%.*]] = or <8 x i32> [[A]], [[B]]
// CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[OR]], i64 0)
// CHECK-NEXT: ret <vscale x 2 x i32> [[CAST_SCALABLE]]
//
fixed_uint32m1_t or_u32(fixed_uint32m1_t a, fixed_uint32m1_t b) {
return a | b;
}
// CHECK-LABEL: @or_u64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[OR:%.*]] = or <4 x i64> [[A]], [[B]]
// CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[OR]], i64 0)
// CHECK-NEXT: ret <vscale x 1 x i64> [[CAST_SCALABLE]]
//
fixed_uint64m1_t or_u64(fixed_uint64m1_t a, fixed_uint64m1_t b) {
return a | b;
}
// XOR
// CHECK-LABEL: @xor_i8(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[XOR:%.*]] = xor <32 x i8> [[A]], [[B]]
// CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[XOR]], i64 0)
// CHECK-NEXT: ret <vscale x 8 x i8> [[CAST_SCALABLE]]
//
fixed_int8m1_t xor_i8(fixed_int8m1_t a, fixed_int8m1_t b) {
return a ^ b;
}
// CHECK-LABEL: @xor_i16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[XOR:%.*]] = xor <16 x i16> [[A]], [[B]]
// CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[XOR]], i64 0)
// CHECK-NEXT: ret <vscale x 4 x i16> [[CAST_SCALABLE]]
//
fixed_int16m1_t xor_i16(fixed_int16m1_t a, fixed_int16m1_t b) {
return a ^ b;
}
// CHECK-LABEL: @xor_i32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[XOR:%.*]] = xor <8 x i32> [[A]], [[B]]
// CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[XOR]], i64 0)
// CHECK-NEXT: ret <vscale x 2 x i32> [[CAST_SCALABLE]]
//
fixed_int32m1_t xor_i32(fixed_int32m1_t a, fixed_int32m1_t b) {
return a ^ b;
}
// CHECK-LABEL: @xor_i64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[XOR:%.*]] = xor <4 x i64> [[A]], [[B]]
// CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[XOR]], i64 0)
// CHECK-NEXT: ret <vscale x 1 x i64> [[CAST_SCALABLE]]
//
fixed_int64m1_t xor_i64(fixed_int64m1_t a, fixed_int64m1_t b) {
return a ^ b;
}
// CHECK-LABEL: @xor_u8(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[XOR:%.*]] = xor <32 x i8> [[A]], [[B]]
// CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[XOR]], i64 0)
// CHECK-NEXT: ret <vscale x 8 x i8> [[CAST_SCALABLE]]
//
fixed_uint8m1_t xor_u8(fixed_uint8m1_t a, fixed_uint8m1_t b) {
return a ^ b;
}
// CHECK-LABEL: @xor_u16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[XOR:%.*]] = xor <16 x i16> [[A]], [[B]]
// CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[XOR]], i64 0)
// CHECK-NEXT: ret <vscale x 4 x i16> [[CAST_SCALABLE]]
//
fixed_uint16m1_t xor_u16(fixed_uint16m1_t a, fixed_uint16m1_t b) {
return a ^ b;
}
// CHECK-LABEL: @xor_u32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[XOR:%.*]] = xor <8 x i32> [[A]], [[B]]
// CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[XOR]], i64 0)
// CHECK-NEXT: ret <vscale x 2 x i32> [[CAST_SCALABLE]]
//
fixed_uint32m1_t xor_u32(fixed_uint32m1_t a, fixed_uint32m1_t b) {
return a ^ b;
}
// CHECK-LABEL: @xor_u64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[XOR:%.*]] = xor <4 x i64> [[A]], [[B]]
// CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[XOR]], i64 0)
// CHECK-NEXT: ret <vscale x 1 x i64> [[CAST_SCALABLE]]
//
fixed_uint64m1_t xor_u64(fixed_uint64m1_t a, fixed_uint64m1_t b) {
return a ^ b;
}
// NEG
// CHECK-LABEL: @not_i8(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[NOT:%.*]] = xor <32 x i8> [[A]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
// CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[NOT]], i64 0)
// CHECK-NEXT: ret <vscale x 8 x i8> [[CAST_SCALABLE]]
//
fixed_int8m1_t not_i8(fixed_int8m1_t a) {
return ~a;
}
// CHECK-LABEL: @not_i16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[NOT:%.*]] = xor <16 x i16> [[A]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
// CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[NOT]], i64 0)
// CHECK-NEXT: ret <vscale x 4 x i16> [[CAST_SCALABLE]]
//
fixed_int16m1_t not_i16(fixed_int16m1_t a) {
return ~a;
}
// CHECK-LABEL: @not_i32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[NOT:%.*]] = xor <8 x i32> [[A]], <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
// CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[NOT]], i64 0)
// CHECK-NEXT: ret <vscale x 2 x i32> [[CAST_SCALABLE]]
//
fixed_int32m1_t not_i32(fixed_int32m1_t a) {
return ~a;
}
// CHECK-LABEL: @not_i64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[NOT:%.*]] = xor <4 x i64> [[A]], <i64 -1, i64 -1, i64 -1, i64 -1>
// CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[NOT]], i64 0)
// CHECK-NEXT: ret <vscale x 1 x i64> [[CAST_SCALABLE]]
//
fixed_int64m1_t not_i64(fixed_int64m1_t a) {
return ~a;
}
// CHECK-LABEL: @not_u8(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[NOT:%.*]] = xor <32 x i8> [[A]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
// CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[NOT]], i64 0)
// CHECK-NEXT: ret <vscale x 8 x i8> [[CAST_SCALABLE]]
//
fixed_uint8m1_t not_u8(fixed_uint8m1_t a) {
return ~a;
}
// CHECK-LABEL: @not_u16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[NOT:%.*]] = xor <16 x i16> [[A]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
// CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[NOT]], i64 0)
// CHECK-NEXT: ret <vscale x 4 x i16> [[CAST_SCALABLE]]
//
fixed_uint16m1_t not_u16(fixed_uint16m1_t a) {
return ~a;
}
// CHECK-LABEL: @not_u32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[NOT:%.*]] = xor <8 x i32> [[A]], <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
// CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[NOT]], i64 0)
// CHECK-NEXT: ret <vscale x 2 x i32> [[CAST_SCALABLE]]
//
fixed_uint32m1_t not_u32(fixed_uint32m1_t a) {
return ~a;
}
// CHECK-LABEL: @not_u64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT: [[NOT:%.*]] = xor <4 x i64> [[A]], <i64 -1, i64 -1, i64 -1, i64 -1>
// CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[NOT]], i64 0)
// CHECK-NEXT: ret <vscale x 1 x i64> [[CAST_SCALABLE]]
//
fixed_uint64m1_t not_u64(fixed_uint64m1_t a) {
return ~a;
}