| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 |
| ; RUN: llc -verify-machineinstrs -mattr=+sve < %s | FileCheck %s |
| |
| target triple = "aarch64-unknown-linux-gnu" |
| |
| ; Muls with (-1) as operand should fold to neg. |
| define <vscale x 16 x i8> @mul_neg_fold_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) { |
| ; CHECK-LABEL: mul_neg_fold_i8: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: neg z0.b, p0/m, z0.b |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 16 x i8> @llvm.aarch64.sve.mul.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> splat(i8 -1)) |
| ret <vscale x 16 x i8> %1 |
| } |
| |
| define <vscale x 8 x i16> @mul_neg_fold_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) { |
| ; CHECK-LABEL: mul_neg_fold_i16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: neg z0.h, p0/m, z0.h |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 8 x i16> @llvm.aarch64.sve.mul.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> splat(i16 -1)) |
| ret <vscale x 8 x i16> %1 |
| } |
| |
| define <vscale x 4 x i32> @mul_neg_fold_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) { |
| ; CHECK-LABEL: mul_neg_fold_i32: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: neg z0.s, p0/m, z0.s |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.aarch64.sve.mul.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> splat(i32 -1)) |
| ret <vscale x 4 x i32> %1 |
| } |
| |
| define <vscale x 2 x i64> @mul_neg_fold_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) { |
| ; CHECK-LABEL: mul_neg_fold_i64: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: neg z0.d, p0/m, z0.d |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 2 x i64> @llvm.aarch64.sve.mul.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> splat(i64 -1)) |
| ret <vscale x 2 x i64> %1 |
| } |
| |
| define <vscale x 16 x i8> @mul_neg_fold_u_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) { |
| ; CHECK-LABEL: mul_neg_fold_u_i8: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: neg z0.b, p0/m, z0.b |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 16 x i8> @llvm.aarch64.sve.mul.u.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> splat(i8 -1)) |
| ret <vscale x 16 x i8> %1 |
| } |
| |
| define <vscale x 8 x i16> @mul_neg_fold_u_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) { |
| ; CHECK-LABEL: mul_neg_fold_u_i16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: neg z0.h, p0/m, z0.h |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 8 x i16> @llvm.aarch64.sve.mul.u.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> splat(i16 -1)) |
| ret <vscale x 8 x i16> %1 |
| } |
| |
| define <vscale x 4 x i32> @mul_neg_fold_u_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) { |
| ; CHECK-LABEL: mul_neg_fold_u_i32: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: neg z0.s, p0/m, z0.s |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.aarch64.sve.mul.u.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> splat(i32 -1)) |
| ret <vscale x 4 x i32> %1 |
| } |
| |
| define <vscale x 2 x i64> @mul_neg_fold_u_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) { |
| ; CHECK-LABEL: mul_neg_fold_u_i64: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: neg z0.d, p0/m, z0.d |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 2 x i64> @llvm.aarch64.sve.mul.u.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> splat(i64 -1)) |
| ret <vscale x 2 x i64> %1 |
| } |
| |
| define <vscale x 16 x i8> @mul_neg_fold_different_argument_order_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) { |
| ; CHECK-LABEL: mul_neg_fold_different_argument_order_i8: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: mov z1.b, #-1 // =0xffffffffffffffff |
| ; CHECK-NEXT: neg z1.b, p0/m, z0.b |
| ; CHECK-NEXT: mov z0.d, z1.d |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 16 x i8> @llvm.aarch64.sve.mul.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> splat(i8 -1), <vscale x 16 x i8> %a) |
| ret <vscale x 16 x i8> %1 |
| } |
| |
| define <vscale x 8 x i16> @mul_neg_fold_different_argument_order_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) { |
| ; CHECK-LABEL: mul_neg_fold_different_argument_order_i16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: mov z1.h, #-1 // =0xffffffffffffffff |
| ; CHECK-NEXT: neg z1.h, p0/m, z0.h |
| ; CHECK-NEXT: mov z0.d, z1.d |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 8 x i16> @llvm.aarch64.sve.mul.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> splat(i16 -1), <vscale x 8 x i16> %a) |
| ret <vscale x 8 x i16> %1 |
| } |
| |
| define <vscale x 4 x i32> @mul_neg_fold_different_argument_order_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) { |
| ; CHECK-LABEL: mul_neg_fold_different_argument_order_i32: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: mov z1.s, #-1 // =0xffffffffffffffff |
| ; CHECK-NEXT: neg z1.s, p0/m, z0.s |
| ; CHECK-NEXT: mov z0.d, z1.d |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.aarch64.sve.mul.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> splat(i32 -1), <vscale x 4 x i32> %a) |
| ret <vscale x 4 x i32> %1 |
| } |
| |
| define <vscale x 2 x i64> @mul_neg_fold_different_argument_order_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) { |
| ; CHECK-LABEL: mul_neg_fold_different_argument_order_i64: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: mov z1.d, #-1 // =0xffffffffffffffff |
| ; CHECK-NEXT: neg z1.d, p0/m, z0.d |
| ; CHECK-NEXT: mov z0.d, z1.d |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 2 x i64> @llvm.aarch64.sve.mul.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> splat(i64 -1), <vscale x 2 x i64> %a) |
| ret <vscale x 2 x i64> %1 |
| } |
| |
| declare <vscale x 16 x i8> @llvm.aarch64.sve.mul.u.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>) |
| declare <vscale x 8 x i16> @llvm.aarch64.sve.mul.u.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>) |
| declare <vscale x 4 x i32> @llvm.aarch64.sve.mul.u.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>) |
| declare <vscale x 2 x i64> @llvm.aarch64.sve.mul.u.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>) |
| |
| declare <vscale x 16 x i8> @llvm.aarch64.sve.mul.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>) |
| declare <vscale x 8 x i16> @llvm.aarch64.sve.mul.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>) |
| declare <vscale x 4 x i32> @llvm.aarch64.sve.mul.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>) |
| declare <vscale x 2 x i64> @llvm.aarch64.sve.mul.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>) |