| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc -mtriple=riscv32 -mattr=+m,+v < %s | FileCheck %s |
| ; RUN: llc -mtriple=riscv64 -mattr=+m,+v < %s | FileCheck %s |
| |
| ; fold (and (or x, C), D) -> D if (C & D) == D |
| |
| define <vscale x 4 x i32> @and_or_nxv4i32(<vscale x 4 x i32> %A) { |
| ; CHECK-LABEL: and_or_nxv4i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma |
| ; CHECK-NEXT: vmv.v.i v8, 8 |
| ; CHECK-NEXT: ret |
| %v1 = or <vscale x 4 x i32> %A, splat (i32 255) |
| %v2 = and <vscale x 4 x i32> %v1, splat (i32 8) |
| ret <vscale x 4 x i32> %v2 |
| } |
| |
| ; (or (and X, c1), c2) -> (and (or X, c2), c1|c2) iff (c1 & c2) != 0 |
| |
| define <vscale x 2 x i64> @or_and_nxv2i64(<vscale x 2 x i64> %a0) { |
| ; CHECK-LABEL: or_and_nxv2i64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma |
| ; CHECK-NEXT: vor.vi v8, v8, 3 |
| ; CHECK-NEXT: vand.vi v8, v8, 7 |
| ; CHECK-NEXT: ret |
| %v1 = and <vscale x 2 x i64> %a0, splat (i64 7) |
| %v2 = or <vscale x 2 x i64> %v1, splat (i64 3) |
| ret <vscale x 2 x i64> %v2 |
| } |
| |
| ; If all masked bits are going to be set, that's a constant fold. |
| |
| define <vscale x 2 x i64> @or_and_nxv2i64_fold(<vscale x 2 x i64> %a0) { |
| ; CHECK-LABEL: or_and_nxv2i64_fold: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma |
| ; CHECK-NEXT: vmv.v.i v8, 3 |
| ; CHECK-NEXT: ret |
| %v1 = and <vscale x 2 x i64> %a0, splat (i64 1) |
| %v2 = or <vscale x 2 x i64> %v1, splat (i64 3) |
| ret <vscale x 2 x i64> %v2 |
| } |
| |
| ; fold (shl (shl x, c1), c2) -> (shl x, (add c1, c2)) |
| |
| define <vscale x 4 x i32> @combine_vec_shl_shl(<vscale x 4 x i32> %x) { |
| ; CHECK-LABEL: combine_vec_shl_shl: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma |
| ; CHECK-NEXT: vsll.vi v8, v8, 6 |
| ; CHECK-NEXT: ret |
| %v1 = shl <vscale x 4 x i32> %x, splat (i32 2) |
| %v2 = shl <vscale x 4 x i32> %v1, splat (i32 4) |
| ret <vscale x 4 x i32> %v2 |
| } |
| |
| ; fold (sra (sra x, c1), c2) -> (sra x, (add c1, c2)) |
| |
| define <vscale x 2 x i32> @combine_vec_ashr_ashr(<vscale x 2 x i32> %x) { |
| ; CHECK-LABEL: combine_vec_ashr_ashr: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma |
| ; CHECK-NEXT: vsra.vi v8, v8, 6 |
| ; CHECK-NEXT: ret |
| %v1 = ashr <vscale x 2 x i32> %x, splat (i32 2) |
| %v2 = ashr <vscale x 2 x i32> %v1, splat (i32 4) |
| ret <vscale x 2 x i32> %v2 |
| } |
| |
| ; fold (srl (srl x, c1), c2) -> (srl x, (add c1, c2)) |
| |
| define <vscale x 8 x i16> @combine_vec_lshr_lshr(<vscale x 8 x i16> %x) { |
| ; CHECK-LABEL: combine_vec_lshr_lshr: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma |
| ; CHECK-NEXT: vsrl.vi v8, v8, 8 |
| ; CHECK-NEXT: ret |
| %v1 = lshr <vscale x 8 x i16> %x, splat (i16 4) |
| %v2 = lshr <vscale x 8 x i16> %v1, splat (i16 4) |
| ret <vscale x 8 x i16> %v2 |
| } |
| |
| ; fold (fmul x, 1.0) -> x |
| define <vscale x 2 x float> @combine_fmul_one(<vscale x 2 x float> %x) { |
| ; CHECK-LABEL: combine_fmul_one: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: ret |
| %v = fmul <vscale x 2 x float> %x, splat (float 1.0) |
| ret <vscale x 2 x float> %v |
| } |
| |
| ; fold (fmul 1.0, x) -> x |
| define <vscale x 2 x float> @combine_fmul_one_commuted(<vscale x 2 x float> %x) { |
| ; CHECK-LABEL: combine_fmul_one_commuted: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: ret |
| %v = fmul <vscale x 2 x float> splat (float 1.0), %x |
| ret <vscale x 2 x float> %v |
| } |