| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1 |
| ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=4 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX4 |
| ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1 |
| ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=4 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX4 |
| |
| define <2 x i16> @sextload_v2i1_v2i16(<2 x i1>* %x) { |
| ; CHECK-LABEL: sextload_v2i1_v2i16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu |
| ; CHECK-NEXT: vlm.v v0, (a0) |
| ; CHECK-NEXT: vmv.v.i v8, 0 |
| ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 |
| ; CHECK-NEXT: ret |
| %y = load <2 x i1>, <2 x i1>* %x |
| %z = sext <2 x i1> %y to <2 x i16> |
| ret <2 x i16> %z |
| } |
| |
| define <2 x i16> @sextload_v2i8_v2i16(<2 x i8>* %x) { |
| ; CHECK-LABEL: sextload_v2i8_v2i16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu |
| ; CHECK-NEXT: vle8.v v9, (a0) |
| ; CHECK-NEXT: vsext.vf2 v8, v9 |
| ; CHECK-NEXT: ret |
| %y = load <2 x i8>, <2 x i8>* %x |
| %z = sext <2 x i8> %y to <2 x i16> |
| ret <2 x i16> %z |
| } |
| |
| define <2 x i16> @zextload_v2i8_v2i16(<2 x i8>* %x) { |
| ; CHECK-LABEL: zextload_v2i8_v2i16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu |
| ; CHECK-NEXT: vle8.v v9, (a0) |
| ; CHECK-NEXT: vzext.vf2 v8, v9 |
| ; CHECK-NEXT: ret |
| %y = load <2 x i8>, <2 x i8>* %x |
| %z = zext <2 x i8> %y to <2 x i16> |
| ret <2 x i16> %z |
| } |
| |
| define <2 x i32> @sextload_v2i8_v2i32(<2 x i8>* %x) { |
| ; CHECK-LABEL: sextload_v2i8_v2i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu |
| ; CHECK-NEXT: vle8.v v9, (a0) |
| ; CHECK-NEXT: vsext.vf4 v8, v9 |
| ; CHECK-NEXT: ret |
| %y = load <2 x i8>, <2 x i8>* %x |
| %z = sext <2 x i8> %y to <2 x i32> |
| ret <2 x i32> %z |
| } |
| |
| define <2 x i32> @zextload_v2i8_v2i32(<2 x i8>* %x) { |
| ; CHECK-LABEL: zextload_v2i8_v2i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu |
| ; CHECK-NEXT: vle8.v v9, (a0) |
| ; CHECK-NEXT: vzext.vf4 v8, v9 |
| ; CHECK-NEXT: ret |
| %y = load <2 x i8>, <2 x i8>* %x |
| %z = zext <2 x i8> %y to <2 x i32> |
| ret <2 x i32> %z |
| } |
| |
| define <2 x i64> @sextload_v2i8_v2i64(<2 x i8>* %x) { |
| ; CHECK-LABEL: sextload_v2i8_v2i64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; CHECK-NEXT: vle8.v v9, (a0) |
| ; CHECK-NEXT: vsext.vf8 v8, v9 |
| ; CHECK-NEXT: ret |
| %y = load <2 x i8>, <2 x i8>* %x |
| %z = sext <2 x i8> %y to <2 x i64> |
| ret <2 x i64> %z |
| } |
| |
| define <2 x i64> @zextload_v2i8_v2i64(<2 x i8>* %x) { |
| ; CHECK-LABEL: zextload_v2i8_v2i64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; CHECK-NEXT: vle8.v v9, (a0) |
| ; CHECK-NEXT: vzext.vf8 v8, v9 |
| ; CHECK-NEXT: ret |
| %y = load <2 x i8>, <2 x i8>* %x |
| %z = zext <2 x i8> %y to <2 x i64> |
| ret <2 x i64> %z |
| } |
| |
| define <4 x i16> @sextload_v4i8_v4i16(<4 x i8>* %x) { |
| ; CHECK-LABEL: sextload_v4i8_v4i16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu |
| ; CHECK-NEXT: vle8.v v9, (a0) |
| ; CHECK-NEXT: vsext.vf2 v8, v9 |
| ; CHECK-NEXT: ret |
| %y = load <4 x i8>, <4 x i8>* %x |
| %z = sext <4 x i8> %y to <4 x i16> |
| ret <4 x i16> %z |
| } |
| |
| define <4 x i16> @zextload_v4i8_v4i16(<4 x i8>* %x) { |
| ; CHECK-LABEL: zextload_v4i8_v4i16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu |
| ; CHECK-NEXT: vle8.v v9, (a0) |
| ; CHECK-NEXT: vzext.vf2 v8, v9 |
| ; CHECK-NEXT: ret |
| %y = load <4 x i8>, <4 x i8>* %x |
| %z = zext <4 x i8> %y to <4 x i16> |
| ret <4 x i16> %z |
| } |
| |
| define <4 x i32> @sextload_v4i8_v4i32(<4 x i8>* %x) { |
| ; CHECK-LABEL: sextload_v4i8_v4i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu |
| ; CHECK-NEXT: vle8.v v9, (a0) |
| ; CHECK-NEXT: vsext.vf4 v8, v9 |
| ; CHECK-NEXT: ret |
| %y = load <4 x i8>, <4 x i8>* %x |
| %z = sext <4 x i8> %y to <4 x i32> |
| ret <4 x i32> %z |
| } |
| |
| define <4 x i32> @zextload_v4i8_v4i32(<4 x i8>* %x) { |
| ; CHECK-LABEL: zextload_v4i8_v4i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu |
| ; CHECK-NEXT: vle8.v v9, (a0) |
| ; CHECK-NEXT: vzext.vf4 v8, v9 |
| ; CHECK-NEXT: ret |
| %y = load <4 x i8>, <4 x i8>* %x |
| %z = zext <4 x i8> %y to <4 x i32> |
| ret <4 x i32> %z |
| } |
| |
| define <4 x i64> @sextload_v4i8_v4i64(<4 x i8>* %x) { |
| ; LMULMAX1-LABEL: sextload_v4i8_v4i64: |
| ; LMULMAX1: # %bb.0: |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf4, ta, mu |
| ; LMULMAX1-NEXT: vle8.v v10, (a0) |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v8, v10, 2 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; LMULMAX1-NEXT: vsext.vf8 v9, v8 |
| ; LMULMAX1-NEXT: vsext.vf8 v8, v10 |
| ; LMULMAX1-NEXT: ret |
| ; |
| ; LMULMAX4-LABEL: sextload_v4i8_v4i64: |
| ; LMULMAX4: # %bb.0: |
| ; LMULMAX4-NEXT: vsetivli zero, 4, e64, m2, ta, mu |
| ; LMULMAX4-NEXT: vle8.v v10, (a0) |
| ; LMULMAX4-NEXT: vsext.vf8 v8, v10 |
| ; LMULMAX4-NEXT: ret |
| %y = load <4 x i8>, <4 x i8>* %x |
| %z = sext <4 x i8> %y to <4 x i64> |
| ret <4 x i64> %z |
| } |
| |
| define <4 x i64> @zextload_v4i8_v4i64(<4 x i8>* %x) { |
| ; LMULMAX1-LABEL: zextload_v4i8_v4i64: |
| ; LMULMAX1: # %bb.0: |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf4, ta, mu |
| ; LMULMAX1-NEXT: vle8.v v10, (a0) |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v8, v10, 2 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; LMULMAX1-NEXT: vzext.vf8 v9, v8 |
| ; LMULMAX1-NEXT: vzext.vf8 v8, v10 |
| ; LMULMAX1-NEXT: ret |
| ; |
| ; LMULMAX4-LABEL: zextload_v4i8_v4i64: |
| ; LMULMAX4: # %bb.0: |
| ; LMULMAX4-NEXT: vsetivli zero, 4, e64, m2, ta, mu |
| ; LMULMAX4-NEXT: vle8.v v10, (a0) |
| ; LMULMAX4-NEXT: vzext.vf8 v8, v10 |
| ; LMULMAX4-NEXT: ret |
| %y = load <4 x i8>, <4 x i8>* %x |
| %z = zext <4 x i8> %y to <4 x i64> |
| ret <4 x i64> %z |
| } |
| |
| define <8 x i16> @sextload_v8i8_v8i16(<8 x i8>* %x) { |
| ; CHECK-LABEL: sextload_v8i8_v8i16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu |
| ; CHECK-NEXT: vle8.v v9, (a0) |
| ; CHECK-NEXT: vsext.vf2 v8, v9 |
| ; CHECK-NEXT: ret |
| %y = load <8 x i8>, <8 x i8>* %x |
| %z = sext <8 x i8> %y to <8 x i16> |
| ret <8 x i16> %z |
| } |
| |
| define <8 x i16> @zextload_v8i8_v8i16(<8 x i8>* %x) { |
| ; CHECK-LABEL: zextload_v8i8_v8i16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu |
| ; CHECK-NEXT: vle8.v v9, (a0) |
| ; CHECK-NEXT: vzext.vf2 v8, v9 |
| ; CHECK-NEXT: ret |
| %y = load <8 x i8>, <8 x i8>* %x |
| %z = zext <8 x i8> %y to <8 x i16> |
| ret <8 x i16> %z |
| } |
| |
| define <8 x i32> @sextload_v8i8_v8i32(<8 x i8>* %x) { |
| ; LMULMAX1-LABEL: sextload_v8i8_v8i32: |
| ; LMULMAX1: # %bb.0: |
| ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu |
| ; LMULMAX1-NEXT: vle8.v v10, (a0) |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v8, v10, 4 |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu |
| ; LMULMAX1-NEXT: vsext.vf4 v9, v8 |
| ; LMULMAX1-NEXT: vsext.vf4 v8, v10 |
| ; LMULMAX1-NEXT: ret |
| ; |
| ; LMULMAX4-LABEL: sextload_v8i8_v8i32: |
| ; LMULMAX4: # %bb.0: |
| ; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, mu |
| ; LMULMAX4-NEXT: vle8.v v10, (a0) |
| ; LMULMAX4-NEXT: vsext.vf4 v8, v10 |
| ; LMULMAX4-NEXT: ret |
| %y = load <8 x i8>, <8 x i8>* %x |
| %z = sext <8 x i8> %y to <8 x i32> |
| ret <8 x i32> %z |
| } |
| |
| define <8 x i32> @zextload_v8i8_v8i32(<8 x i8>* %x) { |
| ; LMULMAX1-LABEL: zextload_v8i8_v8i32: |
| ; LMULMAX1: # %bb.0: |
| ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu |
| ; LMULMAX1-NEXT: vle8.v v10, (a0) |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v8, v10, 4 |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu |
| ; LMULMAX1-NEXT: vzext.vf4 v9, v8 |
| ; LMULMAX1-NEXT: vzext.vf4 v8, v10 |
| ; LMULMAX1-NEXT: ret |
| ; |
| ; LMULMAX4-LABEL: zextload_v8i8_v8i32: |
| ; LMULMAX4: # %bb.0: |
| ; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, mu |
| ; LMULMAX4-NEXT: vle8.v v10, (a0) |
| ; LMULMAX4-NEXT: vzext.vf4 v8, v10 |
| ; LMULMAX4-NEXT: ret |
| %y = load <8 x i8>, <8 x i8>* %x |
| %z = zext <8 x i8> %y to <8 x i32> |
| ret <8 x i32> %z |
| } |
| |
| define <8 x i64> @sextload_v8i8_v8i64(<8 x i8>* %x) { |
| ; LMULMAX1-LABEL: sextload_v8i8_v8i64: |
| ; LMULMAX1: # %bb.0: |
| ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu |
| ; LMULMAX1-NEXT: vle8.v v12, (a0) |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v8, v12, 4 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; LMULMAX1-NEXT: vsext.vf8 v10, v8 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v11, v12, 2 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; LMULMAX1-NEXT: vsext.vf8 v9, v11 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v8, v8, 2 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; LMULMAX1-NEXT: vsext.vf8 v11, v8 |
| ; LMULMAX1-NEXT: vsext.vf8 v8, v12 |
| ; LMULMAX1-NEXT: ret |
| ; |
| ; LMULMAX4-LABEL: sextload_v8i8_v8i64: |
| ; LMULMAX4: # %bb.0: |
| ; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, mu |
| ; LMULMAX4-NEXT: vle8.v v12, (a0) |
| ; LMULMAX4-NEXT: vsext.vf8 v8, v12 |
| ; LMULMAX4-NEXT: ret |
| %y = load <8 x i8>, <8 x i8>* %x |
| %z = sext <8 x i8> %y to <8 x i64> |
| ret <8 x i64> %z |
| } |
| |
| define <8 x i64> @zextload_v8i8_v8i64(<8 x i8>* %x) { |
| ; LMULMAX1-LABEL: zextload_v8i8_v8i64: |
| ; LMULMAX1: # %bb.0: |
| ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu |
| ; LMULMAX1-NEXT: vle8.v v12, (a0) |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v8, v12, 4 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; LMULMAX1-NEXT: vzext.vf8 v10, v8 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v11, v12, 2 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; LMULMAX1-NEXT: vzext.vf8 v9, v11 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v8, v8, 2 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; LMULMAX1-NEXT: vzext.vf8 v11, v8 |
| ; LMULMAX1-NEXT: vzext.vf8 v8, v12 |
| ; LMULMAX1-NEXT: ret |
| ; |
| ; LMULMAX4-LABEL: zextload_v8i8_v8i64: |
| ; LMULMAX4: # %bb.0: |
| ; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, mu |
| ; LMULMAX4-NEXT: vle8.v v12, (a0) |
| ; LMULMAX4-NEXT: vzext.vf8 v8, v12 |
| ; LMULMAX4-NEXT: ret |
| %y = load <8 x i8>, <8 x i8>* %x |
| %z = zext <8 x i8> %y to <8 x i64> |
| ret <8 x i64> %z |
| } |
| |
| define <16 x i16> @sextload_v16i8_v16i16(<16 x i8>* %x) { |
| ; LMULMAX1-LABEL: sextload_v16i8_v16i16: |
| ; LMULMAX1: # %bb.0: |
| ; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu |
| ; LMULMAX1-NEXT: vle8.v v10, (a0) |
| ; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v8, v10, 8 |
| ; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu |
| ; LMULMAX1-NEXT: vsext.vf2 v9, v8 |
| ; LMULMAX1-NEXT: vsext.vf2 v8, v10 |
| ; LMULMAX1-NEXT: ret |
| ; |
| ; LMULMAX4-LABEL: sextload_v16i8_v16i16: |
| ; LMULMAX4: # %bb.0: |
| ; LMULMAX4-NEXT: vsetivli zero, 16, e16, m2, ta, mu |
| ; LMULMAX4-NEXT: vle8.v v10, (a0) |
| ; LMULMAX4-NEXT: vsext.vf2 v8, v10 |
| ; LMULMAX4-NEXT: ret |
| %y = load <16 x i8>, <16 x i8>* %x |
| %z = sext <16 x i8> %y to <16 x i16> |
| ret <16 x i16> %z |
| } |
| |
| define <16 x i16> @zextload_v16i8_v16i16(<16 x i8>* %x) { |
| ; LMULMAX1-LABEL: zextload_v16i8_v16i16: |
| ; LMULMAX1: # %bb.0: |
| ; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu |
| ; LMULMAX1-NEXT: vle8.v v10, (a0) |
| ; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v8, v10, 8 |
| ; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu |
| ; LMULMAX1-NEXT: vzext.vf2 v9, v8 |
| ; LMULMAX1-NEXT: vzext.vf2 v8, v10 |
| ; LMULMAX1-NEXT: ret |
| ; |
| ; LMULMAX4-LABEL: zextload_v16i8_v16i16: |
| ; LMULMAX4: # %bb.0: |
| ; LMULMAX4-NEXT: vsetivli zero, 16, e16, m2, ta, mu |
| ; LMULMAX4-NEXT: vle8.v v10, (a0) |
| ; LMULMAX4-NEXT: vzext.vf2 v8, v10 |
| ; LMULMAX4-NEXT: ret |
| %y = load <16 x i8>, <16 x i8>* %x |
| %z = zext <16 x i8> %y to <16 x i16> |
| ret <16 x i16> %z |
| } |
| |
| define <16 x i32> @sextload_v16i8_v16i32(<16 x i8>* %x) { |
| ; LMULMAX1-LABEL: sextload_v16i8_v16i32: |
| ; LMULMAX1: # %bb.0: |
| ; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu |
| ; LMULMAX1-NEXT: vle8.v v12, (a0) |
| ; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v8, v12, 8 |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu |
| ; LMULMAX1-NEXT: vsext.vf4 v10, v8 |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v11, v12, 4 |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu |
| ; LMULMAX1-NEXT: vsext.vf4 v9, v11 |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v8, v8, 4 |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu |
| ; LMULMAX1-NEXT: vsext.vf4 v11, v8 |
| ; LMULMAX1-NEXT: vsext.vf4 v8, v12 |
| ; LMULMAX1-NEXT: ret |
| ; |
| ; LMULMAX4-LABEL: sextload_v16i8_v16i32: |
| ; LMULMAX4: # %bb.0: |
| ; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, mu |
| ; LMULMAX4-NEXT: vle8.v v12, (a0) |
| ; LMULMAX4-NEXT: vsext.vf4 v8, v12 |
| ; LMULMAX4-NEXT: ret |
| %y = load <16 x i8>, <16 x i8>* %x |
| %z = sext <16 x i8> %y to <16 x i32> |
| ret <16 x i32> %z |
| } |
| |
| define <16 x i32> @zextload_v16i8_v16i32(<16 x i8>* %x) { |
| ; LMULMAX1-LABEL: zextload_v16i8_v16i32: |
| ; LMULMAX1: # %bb.0: |
| ; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu |
| ; LMULMAX1-NEXT: vle8.v v12, (a0) |
| ; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v8, v12, 8 |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu |
| ; LMULMAX1-NEXT: vzext.vf4 v10, v8 |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v11, v12, 4 |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu |
| ; LMULMAX1-NEXT: vzext.vf4 v9, v11 |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v8, v8, 4 |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu |
| ; LMULMAX1-NEXT: vzext.vf4 v11, v8 |
| ; LMULMAX1-NEXT: vzext.vf4 v8, v12 |
| ; LMULMAX1-NEXT: ret |
| ; |
| ; LMULMAX4-LABEL: zextload_v16i8_v16i32: |
| ; LMULMAX4: # %bb.0: |
| ; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, mu |
| ; LMULMAX4-NEXT: vle8.v v12, (a0) |
| ; LMULMAX4-NEXT: vzext.vf4 v8, v12 |
| ; LMULMAX4-NEXT: ret |
| %y = load <16 x i8>, <16 x i8>* %x |
| %z = zext <16 x i8> %y to <16 x i32> |
| ret <16 x i32> %z |
| } |
| |
| define <16 x i64> @sextload_v16i8_v16i64(<16 x i8>* %x) { |
| ; LMULMAX1-LABEL: sextload_v16i8_v16i64: |
| ; LMULMAX1: # %bb.0: |
| ; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu |
| ; LMULMAX1-NEXT: vle8.v v16, (a0) |
| ; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v8, v16, 8 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; LMULMAX1-NEXT: vsext.vf8 v12, v8 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v10, v16, 2 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; LMULMAX1-NEXT: vsext.vf8 v9, v10 |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v11, v16, 4 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; LMULMAX1-NEXT: vsext.vf8 v10, v11 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v14, v8, 2 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; LMULMAX1-NEXT: vsext.vf8 v13, v14 |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v8, v8, 4 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; LMULMAX1-NEXT: vsext.vf8 v14, v8 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v15, v11, 2 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; LMULMAX1-NEXT: vsext.vf8 v11, v15 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v8, v8, 2 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; LMULMAX1-NEXT: vsext.vf8 v15, v8 |
| ; LMULMAX1-NEXT: vsext.vf8 v8, v16 |
| ; LMULMAX1-NEXT: ret |
| ; |
| ; LMULMAX4-LABEL: sextload_v16i8_v16i64: |
| ; LMULMAX4: # %bb.0: |
| ; LMULMAX4-NEXT: vsetivli zero, 16, e8, m1, ta, mu |
| ; LMULMAX4-NEXT: vle8.v v16, (a0) |
| ; LMULMAX4-NEXT: vsetivli zero, 8, e8, m1, ta, mu |
| ; LMULMAX4-NEXT: vslidedown.vi v8, v16, 8 |
| ; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, mu |
| ; LMULMAX4-NEXT: vsext.vf8 v12, v8 |
| ; LMULMAX4-NEXT: vsext.vf8 v8, v16 |
| ; LMULMAX4-NEXT: ret |
| %y = load <16 x i8>, <16 x i8>* %x |
| %z = sext <16 x i8> %y to <16 x i64> |
| ret <16 x i64> %z |
| } |
| |
| define <16 x i64> @zextload_v16i8_v16i64(<16 x i8>* %x) { |
| ; LMULMAX1-LABEL: zextload_v16i8_v16i64: |
| ; LMULMAX1: # %bb.0: |
| ; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu |
| ; LMULMAX1-NEXT: vle8.v v16, (a0) |
| ; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v8, v16, 8 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; LMULMAX1-NEXT: vzext.vf8 v12, v8 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v10, v16, 2 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; LMULMAX1-NEXT: vzext.vf8 v9, v10 |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v11, v16, 4 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; LMULMAX1-NEXT: vzext.vf8 v10, v11 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v14, v8, 2 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; LMULMAX1-NEXT: vzext.vf8 v13, v14 |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v8, v8, 4 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; LMULMAX1-NEXT: vzext.vf8 v14, v8 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v15, v11, 2 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; LMULMAX1-NEXT: vzext.vf8 v11, v15 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v8, v8, 2 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; LMULMAX1-NEXT: vzext.vf8 v15, v8 |
| ; LMULMAX1-NEXT: vzext.vf8 v8, v16 |
| ; LMULMAX1-NEXT: ret |
| ; |
| ; LMULMAX4-LABEL: zextload_v16i8_v16i64: |
| ; LMULMAX4: # %bb.0: |
| ; LMULMAX4-NEXT: vsetivli zero, 16, e8, m1, ta, mu |
| ; LMULMAX4-NEXT: vle8.v v16, (a0) |
| ; LMULMAX4-NEXT: vsetivli zero, 8, e8, m1, ta, mu |
| ; LMULMAX4-NEXT: vslidedown.vi v8, v16, 8 |
| ; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, mu |
| ; LMULMAX4-NEXT: vzext.vf8 v12, v8 |
| ; LMULMAX4-NEXT: vzext.vf8 v8, v16 |
| ; LMULMAX4-NEXT: ret |
| %y = load <16 x i8>, <16 x i8>* %x |
| %z = zext <16 x i8> %y to <16 x i64> |
| ret <16 x i64> %z |
| } |
| |
| define void @truncstore_v2i8_v2i1(<2 x i8> %x, <2 x i1>* %z) { |
| ; CHECK-LABEL: truncstore_v2i8_v2i1: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu |
| ; CHECK-NEXT: vand.vi v8, v8, 1 |
| ; CHECK-NEXT: vmsne.vi v0, v8, 0 |
| ; CHECK-NEXT: vmv.v.i v8, 0 |
| ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 |
| ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu |
| ; CHECK-NEXT: vmv.v.i v9, 0 |
| ; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu |
| ; CHECK-NEXT: vslideup.vi v9, v8, 0 |
| ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu |
| ; CHECK-NEXT: vmsne.vi v8, v9, 0 |
| ; CHECK-NEXT: vsm.v v8, (a0) |
| ; CHECK-NEXT: ret |
| %y = trunc <2 x i8> %x to <2 x i1> |
| store <2 x i1> %y, <2 x i1>* %z |
| ret void |
| } |
| |
| define void @truncstore_v2i16_v2i8(<2 x i16> %x, <2 x i8>* %z) { |
| ; CHECK-LABEL: truncstore_v2i16_v2i8: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu |
| ; CHECK-NEXT: vncvt.x.x.w v8, v8 |
| ; CHECK-NEXT: vse8.v v8, (a0) |
| ; CHECK-NEXT: ret |
| %y = trunc <2 x i16> %x to <2 x i8> |
| store <2 x i8> %y, <2 x i8>* %z |
| ret void |
| } |
| |
| define <2 x i32> @sextload_v2i16_v2i32(<2 x i16>* %x) { |
| ; CHECK-LABEL: sextload_v2i16_v2i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu |
| ; CHECK-NEXT: vle16.v v9, (a0) |
| ; CHECK-NEXT: vsext.vf2 v8, v9 |
| ; CHECK-NEXT: ret |
| %y = load <2 x i16>, <2 x i16>* %x |
| %z = sext <2 x i16> %y to <2 x i32> |
| ret <2 x i32> %z |
| } |
| |
| define <2 x i32> @zextload_v2i16_v2i32(<2 x i16>* %x) { |
| ; CHECK-LABEL: zextload_v2i16_v2i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu |
| ; CHECK-NEXT: vle16.v v9, (a0) |
| ; CHECK-NEXT: vzext.vf2 v8, v9 |
| ; CHECK-NEXT: ret |
| %y = load <2 x i16>, <2 x i16>* %x |
| %z = zext <2 x i16> %y to <2 x i32> |
| ret <2 x i32> %z |
| } |
| |
| define <2 x i64> @sextload_v2i16_v2i64(<2 x i16>* %x) { |
| ; CHECK-LABEL: sextload_v2i16_v2i64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; CHECK-NEXT: vle16.v v9, (a0) |
| ; CHECK-NEXT: vsext.vf4 v8, v9 |
| ; CHECK-NEXT: ret |
| %y = load <2 x i16>, <2 x i16>* %x |
| %z = sext <2 x i16> %y to <2 x i64> |
| ret <2 x i64> %z |
| } |
| |
| define <2 x i64> @zextload_v2i16_v2i64(<2 x i16>* %x) { |
| ; CHECK-LABEL: zextload_v2i16_v2i64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; CHECK-NEXT: vle16.v v9, (a0) |
| ; CHECK-NEXT: vzext.vf4 v8, v9 |
| ; CHECK-NEXT: ret |
| %y = load <2 x i16>, <2 x i16>* %x |
| %z = zext <2 x i16> %y to <2 x i64> |
| ret <2 x i64> %z |
| } |
| |
| define void @truncstore_v4i16_v4i8(<4 x i16> %x, <4 x i8>* %z) { |
| ; CHECK-LABEL: truncstore_v4i16_v4i8: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu |
| ; CHECK-NEXT: vncvt.x.x.w v8, v8 |
| ; CHECK-NEXT: vse8.v v8, (a0) |
| ; CHECK-NEXT: ret |
| %y = trunc <4 x i16> %x to <4 x i8> |
| store <4 x i8> %y, <4 x i8>* %z |
| ret void |
| } |
| |
| define <4 x i32> @sextload_v4i16_v4i32(<4 x i16>* %x) { |
| ; CHECK-LABEL: sextload_v4i16_v4i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu |
| ; CHECK-NEXT: vle16.v v9, (a0) |
| ; CHECK-NEXT: vsext.vf2 v8, v9 |
| ; CHECK-NEXT: ret |
| %y = load <4 x i16>, <4 x i16>* %x |
| %z = sext <4 x i16> %y to <4 x i32> |
| ret <4 x i32> %z |
| } |
| |
| define <4 x i32> @zextload_v4i16_v4i32(<4 x i16>* %x) { |
| ; CHECK-LABEL: zextload_v4i16_v4i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu |
| ; CHECK-NEXT: vle16.v v9, (a0) |
| ; CHECK-NEXT: vzext.vf2 v8, v9 |
| ; CHECK-NEXT: ret |
| %y = load <4 x i16>, <4 x i16>* %x |
| %z = zext <4 x i16> %y to <4 x i32> |
| ret <4 x i32> %z |
| } |
| |
| define <4 x i64> @sextload_v4i16_v4i64(<4 x i16>* %x) { |
| ; LMULMAX1-LABEL: sextload_v4i16_v4i64: |
| ; LMULMAX1: # %bb.0: |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu |
| ; LMULMAX1-NEXT: vle16.v v10, (a0) |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v8, v10, 2 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; LMULMAX1-NEXT: vsext.vf4 v9, v8 |
| ; LMULMAX1-NEXT: vsext.vf4 v8, v10 |
| ; LMULMAX1-NEXT: ret |
| ; |
| ; LMULMAX4-LABEL: sextload_v4i16_v4i64: |
| ; LMULMAX4: # %bb.0: |
| ; LMULMAX4-NEXT: vsetivli zero, 4, e64, m2, ta, mu |
| ; LMULMAX4-NEXT: vle16.v v10, (a0) |
| ; LMULMAX4-NEXT: vsext.vf4 v8, v10 |
| ; LMULMAX4-NEXT: ret |
| %y = load <4 x i16>, <4 x i16>* %x |
| %z = sext <4 x i16> %y to <4 x i64> |
| ret <4 x i64> %z |
| } |
| |
| define <4 x i64> @zextload_v4i16_v4i64(<4 x i16>* %x) { |
| ; LMULMAX1-LABEL: zextload_v4i16_v4i64: |
| ; LMULMAX1: # %bb.0: |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu |
| ; LMULMAX1-NEXT: vle16.v v10, (a0) |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v8, v10, 2 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; LMULMAX1-NEXT: vzext.vf4 v9, v8 |
| ; LMULMAX1-NEXT: vzext.vf4 v8, v10 |
| ; LMULMAX1-NEXT: ret |
| ; |
| ; LMULMAX4-LABEL: zextload_v4i16_v4i64: |
| ; LMULMAX4: # %bb.0: |
| ; LMULMAX4-NEXT: vsetivli zero, 4, e64, m2, ta, mu |
| ; LMULMAX4-NEXT: vle16.v v10, (a0) |
| ; LMULMAX4-NEXT: vzext.vf4 v8, v10 |
| ; LMULMAX4-NEXT: ret |
| %y = load <4 x i16>, <4 x i16>* %x |
| %z = zext <4 x i16> %y to <4 x i64> |
| ret <4 x i64> %z |
| } |
| |
| define void @truncstore_v8i16_v8i8(<8 x i16> %x, <8 x i8>* %z) { |
| ; CHECK-LABEL: truncstore_v8i16_v8i8: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu |
| ; CHECK-NEXT: vncvt.x.x.w v8, v8 |
| ; CHECK-NEXT: vse8.v v8, (a0) |
| ; CHECK-NEXT: ret |
| %y = trunc <8 x i16> %x to <8 x i8> |
| store <8 x i8> %y, <8 x i8>* %z |
| ret void |
| } |
| |
| define <8 x i32> @sextload_v8i16_v8i32(<8 x i16>* %x) { |
| ; LMULMAX1-LABEL: sextload_v8i16_v8i32: |
| ; LMULMAX1: # %bb.0: |
| ; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu |
| ; LMULMAX1-NEXT: vle16.v v10, (a0) |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v8, v10, 4 |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu |
| ; LMULMAX1-NEXT: vsext.vf2 v9, v8 |
| ; LMULMAX1-NEXT: vsext.vf2 v8, v10 |
| ; LMULMAX1-NEXT: ret |
| ; |
| ; LMULMAX4-LABEL: sextload_v8i16_v8i32: |
| ; LMULMAX4: # %bb.0: |
| ; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, mu |
| ; LMULMAX4-NEXT: vle16.v v10, (a0) |
| ; LMULMAX4-NEXT: vsext.vf2 v8, v10 |
| ; LMULMAX4-NEXT: ret |
| %y = load <8 x i16>, <8 x i16>* %x |
| %z = sext <8 x i16> %y to <8 x i32> |
| ret <8 x i32> %z |
| } |
| |
| define <8 x i32> @zextload_v8i16_v8i32(<8 x i16>* %x) { |
| ; LMULMAX1-LABEL: zextload_v8i16_v8i32: |
| ; LMULMAX1: # %bb.0: |
| ; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu |
| ; LMULMAX1-NEXT: vle16.v v10, (a0) |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v8, v10, 4 |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu |
| ; LMULMAX1-NEXT: vzext.vf2 v9, v8 |
| ; LMULMAX1-NEXT: vzext.vf2 v8, v10 |
| ; LMULMAX1-NEXT: ret |
| ; |
| ; LMULMAX4-LABEL: zextload_v8i16_v8i32: |
| ; LMULMAX4: # %bb.0: |
| ; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, mu |
| ; LMULMAX4-NEXT: vle16.v v10, (a0) |
| ; LMULMAX4-NEXT: vzext.vf2 v8, v10 |
| ; LMULMAX4-NEXT: ret |
| %y = load <8 x i16>, <8 x i16>* %x |
| %z = zext <8 x i16> %y to <8 x i32> |
| ret <8 x i32> %z |
| } |
| |
| define <8 x i64> @sextload_v8i16_v8i64(<8 x i16>* %x) { |
| ; LMULMAX1-LABEL: sextload_v8i16_v8i64: |
| ; LMULMAX1: # %bb.0: |
| ; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu |
| ; LMULMAX1-NEXT: vle16.v v12, (a0) |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v8, v12, 4 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; LMULMAX1-NEXT: vsext.vf4 v10, v8 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v11, v12, 2 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; LMULMAX1-NEXT: vsext.vf4 v9, v11 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v8, v8, 2 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; LMULMAX1-NEXT: vsext.vf4 v11, v8 |
| ; LMULMAX1-NEXT: vsext.vf4 v8, v12 |
| ; LMULMAX1-NEXT: ret |
| ; |
| ; LMULMAX4-LABEL: sextload_v8i16_v8i64: |
| ; LMULMAX4: # %bb.0: |
| ; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, mu |
| ; LMULMAX4-NEXT: vle16.v v12, (a0) |
| ; LMULMAX4-NEXT: vsext.vf4 v8, v12 |
| ; LMULMAX4-NEXT: ret |
| %y = load <8 x i16>, <8 x i16>* %x |
| %z = sext <8 x i16> %y to <8 x i64> |
| ret <8 x i64> %z |
| } |
| |
| define <8 x i64> @zextload_v8i16_v8i64(<8 x i16>* %x) { |
| ; LMULMAX1-LABEL: zextload_v8i16_v8i64: |
| ; LMULMAX1: # %bb.0: |
| ; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu |
| ; LMULMAX1-NEXT: vle16.v v12, (a0) |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v8, v12, 4 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; LMULMAX1-NEXT: vzext.vf4 v10, v8 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v11, v12, 2 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; LMULMAX1-NEXT: vzext.vf4 v9, v11 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v8, v8, 2 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; LMULMAX1-NEXT: vzext.vf4 v11, v8 |
| ; LMULMAX1-NEXT: vzext.vf4 v8, v12 |
| ; LMULMAX1-NEXT: ret |
| ; |
| ; LMULMAX4-LABEL: zextload_v8i16_v8i64: |
| ; LMULMAX4: # %bb.0: |
| ; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, mu |
| ; LMULMAX4-NEXT: vle16.v v12, (a0) |
| ; LMULMAX4-NEXT: vzext.vf4 v8, v12 |
| ; LMULMAX4-NEXT: ret |
| %y = load <8 x i16>, <8 x i16>* %x |
| %z = zext <8 x i16> %y to <8 x i64> |
| ret <8 x i64> %z |
| } |
| |
| define void @truncstore_v16i16_v16i8(<16 x i16> %x, <16 x i8>* %z) { |
| ; LMULMAX1-LABEL: truncstore_v16i16_v16i8: |
| ; LMULMAX1: # %bb.0: |
| ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 |
| ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 |
| ; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, tu, mu |
| ; LMULMAX1-NEXT: vslideup.vi v8, v9, 8 |
| ; LMULMAX1-NEXT: vse8.v v8, (a0) |
| ; LMULMAX1-NEXT: ret |
| ; |
| ; LMULMAX4-LABEL: truncstore_v16i16_v16i8: |
| ; LMULMAX4: # %bb.0: |
| ; LMULMAX4-NEXT: vsetivli zero, 16, e8, m1, ta, mu |
| ; LMULMAX4-NEXT: vncvt.x.x.w v10, v8 |
| ; LMULMAX4-NEXT: vse8.v v10, (a0) |
| ; LMULMAX4-NEXT: ret |
| %y = trunc <16 x i16> %x to <16 x i8> |
| store <16 x i8> %y, <16 x i8>* %z |
| ret void |
| } |
| |
| define <16 x i32> @sextload_v16i16_v16i32(<16 x i16>* %x) { |
| ; LMULMAX1-LABEL: sextload_v16i16_v16i32: |
| ; LMULMAX1: # %bb.0: |
| ; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu |
| ; LMULMAX1-NEXT: vle16.v v10, (a0) |
| ; LMULMAX1-NEXT: addi a0, a0, 16 |
| ; LMULMAX1-NEXT: vle16.v v12, (a0) |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v8, v10, 4 |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu |
| ; LMULMAX1-NEXT: vsext.vf2 v9, v8 |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v8, v12, 4 |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu |
| ; LMULMAX1-NEXT: vsext.vf2 v11, v8 |
| ; LMULMAX1-NEXT: vsext.vf2 v8, v10 |
| ; LMULMAX1-NEXT: vsext.vf2 v10, v12 |
| ; LMULMAX1-NEXT: ret |
| ; |
| ; LMULMAX4-LABEL: sextload_v16i16_v16i32: |
| ; LMULMAX4: # %bb.0: |
| ; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, mu |
| ; LMULMAX4-NEXT: vle16.v v12, (a0) |
| ; LMULMAX4-NEXT: vsext.vf2 v8, v12 |
| ; LMULMAX4-NEXT: ret |
| %y = load <16 x i16>, <16 x i16>* %x |
| %z = sext <16 x i16> %y to <16 x i32> |
| ret <16 x i32> %z |
| } |
| |
| define <16 x i32> @zextload_v16i16_v16i32(<16 x i16>* %x) { |
| ; LMULMAX1-LABEL: zextload_v16i16_v16i32: |
| ; LMULMAX1: # %bb.0: |
| ; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu |
| ; LMULMAX1-NEXT: vle16.v v10, (a0) |
| ; LMULMAX1-NEXT: addi a0, a0, 16 |
| ; LMULMAX1-NEXT: vle16.v v12, (a0) |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v8, v10, 4 |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu |
| ; LMULMAX1-NEXT: vzext.vf2 v9, v8 |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v8, v12, 4 |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu |
| ; LMULMAX1-NEXT: vzext.vf2 v11, v8 |
| ; LMULMAX1-NEXT: vzext.vf2 v8, v10 |
| ; LMULMAX1-NEXT: vzext.vf2 v10, v12 |
| ; LMULMAX1-NEXT: ret |
| ; |
| ; LMULMAX4-LABEL: zextload_v16i16_v16i32: |
| ; LMULMAX4: # %bb.0: |
| ; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, mu |
| ; LMULMAX4-NEXT: vle16.v v12, (a0) |
| ; LMULMAX4-NEXT: vzext.vf2 v8, v12 |
| ; LMULMAX4-NEXT: ret |
| %y = load <16 x i16>, <16 x i16>* %x |
| %z = zext <16 x i16> %y to <16 x i32> |
| ret <16 x i32> %z |
| } |
| |
| define <16 x i64> @sextload_v16i16_v16i64(<16 x i16>* %x) { |
| ; LMULMAX1-LABEL: sextload_v16i16_v16i64: |
| ; LMULMAX1: # %bb.0: |
| ; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu |
| ; LMULMAX1-NEXT: vle16.v v12, (a0) |
| ; LMULMAX1-NEXT: addi a0, a0, 16 |
| ; LMULMAX1-NEXT: vle16.v v16, (a0) |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v8, v12, 4 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; LMULMAX1-NEXT: vsext.vf4 v10, v8 |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v15, v16, 4 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; LMULMAX1-NEXT: vsext.vf4 v14, v15 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v11, v12, 2 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; LMULMAX1-NEXT: vsext.vf4 v9, v11 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v8, v8, 2 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; LMULMAX1-NEXT: vsext.vf4 v11, v8 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v8, v16, 2 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; LMULMAX1-NEXT: vsext.vf4 v13, v8 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v8, v15, 2 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; LMULMAX1-NEXT: vsext.vf4 v15, v8 |
| ; LMULMAX1-NEXT: vsext.vf4 v8, v12 |
| ; LMULMAX1-NEXT: vsext.vf4 v12, v16 |
| ; LMULMAX1-NEXT: ret |
| ; |
| ; LMULMAX4-LABEL: sextload_v16i16_v16i64: |
| ; LMULMAX4: # %bb.0: |
| ; LMULMAX4-NEXT: vsetivli zero, 16, e16, m2, ta, mu |
| ; LMULMAX4-NEXT: vle16.v v16, (a0) |
| ; LMULMAX4-NEXT: vsetivli zero, 8, e16, m2, ta, mu |
| ; LMULMAX4-NEXT: vslidedown.vi v8, v16, 8 |
| ; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, mu |
| ; LMULMAX4-NEXT: vsext.vf4 v12, v8 |
| ; LMULMAX4-NEXT: vsext.vf4 v8, v16 |
| ; LMULMAX4-NEXT: ret |
| %y = load <16 x i16>, <16 x i16>* %x |
| %z = sext <16 x i16> %y to <16 x i64> |
| ret <16 x i64> %z |
| } |
| |
| define <16 x i64> @zextload_v16i16_v16i64(<16 x i16>* %x) { |
| ; LMULMAX1-LABEL: zextload_v16i16_v16i64: |
| ; LMULMAX1: # %bb.0: |
| ; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu |
| ; LMULMAX1-NEXT: vle16.v v12, (a0) |
| ; LMULMAX1-NEXT: addi a0, a0, 16 |
| ; LMULMAX1-NEXT: vle16.v v16, (a0) |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v8, v12, 4 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; LMULMAX1-NEXT: vzext.vf4 v10, v8 |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v15, v16, 4 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; LMULMAX1-NEXT: vzext.vf4 v14, v15 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v11, v12, 2 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; LMULMAX1-NEXT: vzext.vf4 v9, v11 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v8, v8, 2 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; LMULMAX1-NEXT: vzext.vf4 v11, v8 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v8, v16, 2 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; LMULMAX1-NEXT: vzext.vf4 v13, v8 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v8, v15, 2 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; LMULMAX1-NEXT: vzext.vf4 v15, v8 |
| ; LMULMAX1-NEXT: vzext.vf4 v8, v12 |
| ; LMULMAX1-NEXT: vzext.vf4 v12, v16 |
| ; LMULMAX1-NEXT: ret |
| ; |
| ; LMULMAX4-LABEL: zextload_v16i16_v16i64: |
| ; LMULMAX4: # %bb.0: |
| ; LMULMAX4-NEXT: vsetivli zero, 16, e16, m2, ta, mu |
| ; LMULMAX4-NEXT: vle16.v v16, (a0) |
| ; LMULMAX4-NEXT: vsetivli zero, 8, e16, m2, ta, mu |
| ; LMULMAX4-NEXT: vslidedown.vi v8, v16, 8 |
| ; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, mu |
| ; LMULMAX4-NEXT: vzext.vf4 v12, v8 |
| ; LMULMAX4-NEXT: vzext.vf4 v8, v16 |
| ; LMULMAX4-NEXT: ret |
| %y = load <16 x i16>, <16 x i16>* %x |
| %z = zext <16 x i16> %y to <16 x i64> |
| ret <16 x i64> %z |
| } |
| |
| define void @truncstore_v2i32_v2i8(<2 x i32> %x, <2 x i8>* %z) { |
| ; CHECK-LABEL: truncstore_v2i32_v2i8: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu |
| ; CHECK-NEXT: vncvt.x.x.w v8, v8 |
| ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu |
| ; CHECK-NEXT: vncvt.x.x.w v8, v8 |
| ; CHECK-NEXT: vse8.v v8, (a0) |
| ; CHECK-NEXT: ret |
| %y = trunc <2 x i32> %x to <2 x i8> |
| store <2 x i8> %y, <2 x i8>* %z |
| ret void |
| } |
| |
| define void @truncstore_v2i32_v2i16(<2 x i32> %x, <2 x i16>* %z) { |
| ; CHECK-LABEL: truncstore_v2i32_v2i16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu |
| ; CHECK-NEXT: vncvt.x.x.w v8, v8 |
| ; CHECK-NEXT: vse16.v v8, (a0) |
| ; CHECK-NEXT: ret |
| %y = trunc <2 x i32> %x to <2 x i16> |
| store <2 x i16> %y, <2 x i16>* %z |
| ret void |
| } |
| |
| define <2 x i64> @sextload_v2i32_v2i64(<2 x i32>* %x) { |
| ; CHECK-LABEL: sextload_v2i32_v2i64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; CHECK-NEXT: vle32.v v9, (a0) |
| ; CHECK-NEXT: vsext.vf2 v8, v9 |
| ; CHECK-NEXT: ret |
| %y = load <2 x i32>, <2 x i32>* %x |
| %z = sext <2 x i32> %y to <2 x i64> |
| ret <2 x i64> %z |
| } |
| |
| define <2 x i64> @zextload_v2i32_v2i64(<2 x i32>* %x) { |
| ; CHECK-LABEL: zextload_v2i32_v2i64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; CHECK-NEXT: vle32.v v9, (a0) |
| ; CHECK-NEXT: vzext.vf2 v8, v9 |
| ; CHECK-NEXT: ret |
| %y = load <2 x i32>, <2 x i32>* %x |
| %z = zext <2 x i32> %y to <2 x i64> |
| ret <2 x i64> %z |
| } |
| |
| define void @truncstore_v4i32_v4i8(<4 x i32> %x, <4 x i8>* %z) { |
| ; CHECK-LABEL: truncstore_v4i32_v4i8: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu |
| ; CHECK-NEXT: vncvt.x.x.w v8, v8 |
| ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu |
| ; CHECK-NEXT: vncvt.x.x.w v8, v8 |
| ; CHECK-NEXT: vse8.v v8, (a0) |
| ; CHECK-NEXT: ret |
| %y = trunc <4 x i32> %x to <4 x i8> |
| store <4 x i8> %y, <4 x i8>* %z |
| ret void |
| } |
| |
| define void @truncstore_v4i32_v4i16(<4 x i32> %x, <4 x i16>* %z) { |
| ; CHECK-LABEL: truncstore_v4i32_v4i16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu |
| ; CHECK-NEXT: vncvt.x.x.w v8, v8 |
| ; CHECK-NEXT: vse16.v v8, (a0) |
| ; CHECK-NEXT: ret |
| %y = trunc <4 x i32> %x to <4 x i16> |
| store <4 x i16> %y, <4 x i16>* %z |
| ret void |
| } |
| |
| define <4 x i64> @sextload_v4i32_v4i64(<4 x i32>* %x) { |
| ; LMULMAX1-LABEL: sextload_v4i32_v4i64: |
| ; LMULMAX1: # %bb.0: |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu |
| ; LMULMAX1-NEXT: vle32.v v10, (a0) |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v8, v10, 2 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; LMULMAX1-NEXT: vsext.vf2 v9, v8 |
| ; LMULMAX1-NEXT: vsext.vf2 v8, v10 |
| ; LMULMAX1-NEXT: ret |
| ; |
| ; LMULMAX4-LABEL: sextload_v4i32_v4i64: |
| ; LMULMAX4: # %bb.0: |
| ; LMULMAX4-NEXT: vsetivli zero, 4, e64, m2, ta, mu |
| ; LMULMAX4-NEXT: vle32.v v10, (a0) |
| ; LMULMAX4-NEXT: vsext.vf2 v8, v10 |
| ; LMULMAX4-NEXT: ret |
| %y = load <4 x i32>, <4 x i32>* %x |
| %z = sext <4 x i32> %y to <4 x i64> |
| ret <4 x i64> %z |
| } |
| |
| define <4 x i64> @zextload_v4i32_v4i64(<4 x i32>* %x) { |
| ; LMULMAX1-LABEL: zextload_v4i32_v4i64: |
| ; LMULMAX1: # %bb.0: |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu |
| ; LMULMAX1-NEXT: vle32.v v10, (a0) |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v8, v10, 2 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; LMULMAX1-NEXT: vzext.vf2 v9, v8 |
| ; LMULMAX1-NEXT: vzext.vf2 v8, v10 |
| ; LMULMAX1-NEXT: ret |
| ; |
| ; LMULMAX4-LABEL: zextload_v4i32_v4i64: |
| ; LMULMAX4: # %bb.0: |
| ; LMULMAX4-NEXT: vsetivli zero, 4, e64, m2, ta, mu |
| ; LMULMAX4-NEXT: vle32.v v10, (a0) |
| ; LMULMAX4-NEXT: vzext.vf2 v8, v10 |
| ; LMULMAX4-NEXT: ret |
| %y = load <4 x i32>, <4 x i32>* %x |
| %z = zext <4 x i32> %y to <4 x i64> |
| ret <4 x i64> %z |
| } |
| |
| define void @truncstore_v8i32_v8i8(<8 x i32> %x, <8 x i8>* %z) { |
| ; LMULMAX1-LABEL: truncstore_v8i32_v8i8: |
| ; LMULMAX1: # %bb.0: |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 |
| ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 |
| ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf2, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 |
| ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 |
| ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu |
| ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 |
| ; LMULMAX1-NEXT: vse8.v v8, (a0) |
| ; LMULMAX1-NEXT: ret |
| ; |
| ; LMULMAX4-LABEL: truncstore_v8i32_v8i8: |
| ; LMULMAX4: # %bb.0: |
| ; LMULMAX4-NEXT: vsetivli zero, 8, e16, m1, ta, mu |
| ; LMULMAX4-NEXT: vncvt.x.x.w v10, v8 |
| ; LMULMAX4-NEXT: vsetvli zero, zero, e8, mf2, ta, mu |
| ; LMULMAX4-NEXT: vncvt.x.x.w v8, v10 |
| ; LMULMAX4-NEXT: vse8.v v8, (a0) |
| ; LMULMAX4-NEXT: ret |
| %y = trunc <8 x i32> %x to <8 x i8> |
| store <8 x i8> %y, <8 x i8>* %z |
| ret void |
| } |
| |
| define void @truncstore_v8i32_v8i16(<8 x i32> %x, <8 x i16>* %z) { |
| ; LMULMAX1-LABEL: truncstore_v8i32_v8i16: |
| ; LMULMAX1: # %bb.0: |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 |
| ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 |
| ; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu |
| ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 |
| ; LMULMAX1-NEXT: vse16.v v8, (a0) |
| ; LMULMAX1-NEXT: ret |
| ; |
| ; LMULMAX4-LABEL: truncstore_v8i32_v8i16: |
| ; LMULMAX4: # %bb.0: |
| ; LMULMAX4-NEXT: vsetivli zero, 8, e16, m1, ta, mu |
| ; LMULMAX4-NEXT: vncvt.x.x.w v10, v8 |
| ; LMULMAX4-NEXT: vse16.v v10, (a0) |
| ; LMULMAX4-NEXT: ret |
| %y = trunc <8 x i32> %x to <8 x i16> |
| store <8 x i16> %y, <8 x i16>* %z |
| ret void |
| } |
| |
| define <8 x i64> @sextload_v8i32_v8i64(<8 x i32>* %x) { |
| ; LMULMAX1-LABEL: sextload_v8i32_v8i64: |
| ; LMULMAX1: # %bb.0: |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu |
| ; LMULMAX1-NEXT: vle32.v v10, (a0) |
| ; LMULMAX1-NEXT: addi a0, a0, 16 |
| ; LMULMAX1-NEXT: vle32.v v12, (a0) |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v8, v10, 2 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; LMULMAX1-NEXT: vsext.vf2 v9, v8 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v8, v12, 2 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; LMULMAX1-NEXT: vsext.vf2 v11, v8 |
| ; LMULMAX1-NEXT: vsext.vf2 v8, v10 |
| ; LMULMAX1-NEXT: vsext.vf2 v10, v12 |
| ; LMULMAX1-NEXT: ret |
| ; |
| ; LMULMAX4-LABEL: sextload_v8i32_v8i64: |
| ; LMULMAX4: # %bb.0: |
| ; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, mu |
| ; LMULMAX4-NEXT: vle32.v v12, (a0) |
| ; LMULMAX4-NEXT: vsext.vf2 v8, v12 |
| ; LMULMAX4-NEXT: ret |
| %y = load <8 x i32>, <8 x i32>* %x |
| %z = sext <8 x i32> %y to <8 x i64> |
| ret <8 x i64> %z |
| } |
| |
| define <8 x i64> @zextload_v8i32_v8i64(<8 x i32>* %x) { |
| ; LMULMAX1-LABEL: zextload_v8i32_v8i64: |
| ; LMULMAX1: # %bb.0: |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu |
| ; LMULMAX1-NEXT: vle32.v v10, (a0) |
| ; LMULMAX1-NEXT: addi a0, a0, 16 |
| ; LMULMAX1-NEXT: vle32.v v12, (a0) |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v8, v10, 2 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; LMULMAX1-NEXT: vzext.vf2 v9, v8 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v8, v12, 2 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; LMULMAX1-NEXT: vzext.vf2 v11, v8 |
| ; LMULMAX1-NEXT: vzext.vf2 v8, v10 |
| ; LMULMAX1-NEXT: vzext.vf2 v10, v12 |
| ; LMULMAX1-NEXT: ret |
| ; |
| ; LMULMAX4-LABEL: zextload_v8i32_v8i64: |
| ; LMULMAX4: # %bb.0: |
| ; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, mu |
| ; LMULMAX4-NEXT: vle32.v v12, (a0) |
| ; LMULMAX4-NEXT: vzext.vf2 v8, v12 |
| ; LMULMAX4-NEXT: ret |
| %y = load <8 x i32>, <8 x i32>* %x |
| %z = zext <8 x i32> %y to <8 x i64> |
| ret <8 x i64> %z |
| } |
| |
| define void @truncstore_v16i32_v16i8(<16 x i32> %x, <16 x i8>* %z) { |
| ; LMULMAX1-LABEL: truncstore_v16i32_v16i8: |
| ; LMULMAX1: # %bb.0: |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 |
| ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 |
| ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf2, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 |
| ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 |
| ; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, tu, mu |
| ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v10 |
| ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 |
| ; LMULMAX1-NEXT: vsetivli zero, 12, e8, m1, tu, mu |
| ; LMULMAX1-NEXT: vslideup.vi v8, v9, 8 |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v11 |
| ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 |
| ; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, tu, mu |
| ; LMULMAX1-NEXT: vslideup.vi v8, v9, 12 |
| ; LMULMAX1-NEXT: vse8.v v8, (a0) |
| ; LMULMAX1-NEXT: ret |
| ; |
| ; LMULMAX4-LABEL: truncstore_v16i32_v16i8: |
| ; LMULMAX4: # %bb.0: |
| ; LMULMAX4-NEXT: vsetivli zero, 16, e16, m2, ta, mu |
| ; LMULMAX4-NEXT: vncvt.x.x.w v12, v8 |
| ; LMULMAX4-NEXT: vsetvli zero, zero, e8, m1, ta, mu |
| ; LMULMAX4-NEXT: vncvt.x.x.w v8, v12 |
| ; LMULMAX4-NEXT: vse8.v v8, (a0) |
| ; LMULMAX4-NEXT: ret |
| %y = trunc <16 x i32> %x to <16 x i8> |
| store <16 x i8> %y, <16 x i8>* %z |
| ret void |
| } |
| |
| define void @truncstore_v16i32_v16i16(<16 x i32> %x, <16 x i16>* %z) { |
| ; LMULMAX1-LABEL: truncstore_v16i32_v16i16: |
| ; LMULMAX1: # %bb.0: |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 |
| ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 |
| ; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu |
| ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v11 |
| ; LMULMAX1-NEXT: vncvt.x.x.w v10, v10 |
| ; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu |
| ; LMULMAX1-NEXT: vslideup.vi v10, v9, 4 |
| ; LMULMAX1-NEXT: addi a1, a0, 16 |
| ; LMULMAX1-NEXT: vse16.v v10, (a1) |
| ; LMULMAX1-NEXT: vse16.v v8, (a0) |
| ; LMULMAX1-NEXT: ret |
| ; |
| ; LMULMAX4-LABEL: truncstore_v16i32_v16i16: |
| ; LMULMAX4: # %bb.0: |
| ; LMULMAX4-NEXT: vsetivli zero, 16, e16, m2, ta, mu |
| ; LMULMAX4-NEXT: vncvt.x.x.w v12, v8 |
| ; LMULMAX4-NEXT: vse16.v v12, (a0) |
| ; LMULMAX4-NEXT: ret |
| %y = trunc <16 x i32> %x to <16 x i16> |
| store <16 x i16> %y, <16 x i16>* %z |
| ret void |
| } |
| |
| define <16 x i64> @sextload_v16i32_v16i64(<16 x i32>* %x) { |
| ; LMULMAX1-LABEL: sextload_v16i32_v16i64: |
| ; LMULMAX1: # %bb.0: |
| ; LMULMAX1-NEXT: addi a1, a0, 48 |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu |
| ; LMULMAX1-NEXT: vle32.v v16, (a1) |
| ; LMULMAX1-NEXT: addi a1, a0, 32 |
| ; LMULMAX1-NEXT: vle32.v v14, (a1) |
| ; LMULMAX1-NEXT: vle32.v v10, (a0) |
| ; LMULMAX1-NEXT: addi a0, a0, 16 |
| ; LMULMAX1-NEXT: vle32.v v12, (a0) |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v8, v10, 2 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; LMULMAX1-NEXT: vsext.vf2 v9, v8 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v8, v12, 2 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; LMULMAX1-NEXT: vsext.vf2 v11, v8 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v8, v14, 2 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; LMULMAX1-NEXT: vsext.vf2 v13, v8 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v8, v16, 2 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; LMULMAX1-NEXT: vsext.vf2 v15, v8 |
| ; LMULMAX1-NEXT: vsext.vf2 v8, v10 |
| ; LMULMAX1-NEXT: vsext.vf2 v10, v12 |
| ; LMULMAX1-NEXT: vsext.vf2 v12, v14 |
| ; LMULMAX1-NEXT: vsext.vf2 v14, v16 |
| ; LMULMAX1-NEXT: ret |
| ; |
| ; LMULMAX4-LABEL: sextload_v16i32_v16i64: |
| ; LMULMAX4: # %bb.0: |
| ; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, mu |
| ; LMULMAX4-NEXT: vle32.v v16, (a0) |
| ; LMULMAX4-NEXT: vsetivli zero, 8, e32, m4, ta, mu |
| ; LMULMAX4-NEXT: vslidedown.vi v8, v16, 8 |
| ; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, mu |
| ; LMULMAX4-NEXT: vsext.vf2 v12, v8 |
| ; LMULMAX4-NEXT: vsext.vf2 v8, v16 |
| ; LMULMAX4-NEXT: ret |
| %y = load <16 x i32>, <16 x i32>* %x |
| %z = sext <16 x i32> %y to <16 x i64> |
| ret <16 x i64> %z |
| } |
| |
| define <16 x i64> @zextload_v16i32_v16i64(<16 x i32>* %x) { |
| ; LMULMAX1-LABEL: zextload_v16i32_v16i64: |
| ; LMULMAX1: # %bb.0: |
| ; LMULMAX1-NEXT: addi a1, a0, 48 |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu |
| ; LMULMAX1-NEXT: vle32.v v16, (a1) |
| ; LMULMAX1-NEXT: addi a1, a0, 32 |
| ; LMULMAX1-NEXT: vle32.v v14, (a1) |
| ; LMULMAX1-NEXT: vle32.v v10, (a0) |
| ; LMULMAX1-NEXT: addi a0, a0, 16 |
| ; LMULMAX1-NEXT: vle32.v v12, (a0) |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v8, v10, 2 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; LMULMAX1-NEXT: vzext.vf2 v9, v8 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v8, v12, 2 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; LMULMAX1-NEXT: vzext.vf2 v11, v8 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v8, v14, 2 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; LMULMAX1-NEXT: vzext.vf2 v13, v8 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, mu |
| ; LMULMAX1-NEXT: vslidedown.vi v8, v16, 2 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; LMULMAX1-NEXT: vzext.vf2 v15, v8 |
| ; LMULMAX1-NEXT: vzext.vf2 v8, v10 |
| ; LMULMAX1-NEXT: vzext.vf2 v10, v12 |
| ; LMULMAX1-NEXT: vzext.vf2 v12, v14 |
| ; LMULMAX1-NEXT: vzext.vf2 v14, v16 |
| ; LMULMAX1-NEXT: ret |
| ; |
| ; LMULMAX4-LABEL: zextload_v16i32_v16i64: |
| ; LMULMAX4: # %bb.0: |
| ; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, mu |
| ; LMULMAX4-NEXT: vle32.v v16, (a0) |
| ; LMULMAX4-NEXT: vsetivli zero, 8, e32, m4, ta, mu |
| ; LMULMAX4-NEXT: vslidedown.vi v8, v16, 8 |
| ; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, mu |
| ; LMULMAX4-NEXT: vzext.vf2 v12, v8 |
| ; LMULMAX4-NEXT: vzext.vf2 v8, v16 |
| ; LMULMAX4-NEXT: ret |
| %y = load <16 x i32>, <16 x i32>* %x |
| %z = zext <16 x i32> %y to <16 x i64> |
| ret <16 x i64> %z |
| } |
| |
| define void @truncstore_v2i64_v2i8(<2 x i64> %x, <2 x i8>* %z) { |
| ; CHECK-LABEL: truncstore_v2i64_v2i8: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu |
| ; CHECK-NEXT: vncvt.x.x.w v8, v8 |
| ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu |
| ; CHECK-NEXT: vncvt.x.x.w v8, v8 |
| ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu |
| ; CHECK-NEXT: vncvt.x.x.w v8, v8 |
| ; CHECK-NEXT: vse8.v v8, (a0) |
| ; CHECK-NEXT: ret |
| %y = trunc <2 x i64> %x to <2 x i8> |
| store <2 x i8> %y, <2 x i8>* %z |
| ret void |
| } |
| |
| define void @truncstore_v2i64_v2i16(<2 x i64> %x, <2 x i16>* %z) { |
| ; CHECK-LABEL: truncstore_v2i64_v2i16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu |
| ; CHECK-NEXT: vncvt.x.x.w v8, v8 |
| ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu |
| ; CHECK-NEXT: vncvt.x.x.w v8, v8 |
| ; CHECK-NEXT: vse16.v v8, (a0) |
| ; CHECK-NEXT: ret |
| %y = trunc <2 x i64> %x to <2 x i16> |
| store <2 x i16> %y, <2 x i16>* %z |
| ret void |
| } |
| |
| define void @truncstore_v2i64_v2i32(<2 x i64> %x, <2 x i32>* %z) { |
| ; CHECK-LABEL: truncstore_v2i64_v2i32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu |
| ; CHECK-NEXT: vncvt.x.x.w v8, v8 |
| ; CHECK-NEXT: vse32.v v8, (a0) |
| ; CHECK-NEXT: ret |
| %y = trunc <2 x i64> %x to <2 x i32> |
| store <2 x i32> %y, <2 x i32>* %z |
| ret void |
| } |
| |
| define void @truncstore_v4i64_v4i8(<4 x i64> %x, <4 x i8>* %z) { |
| ; LMULMAX1-LABEL: truncstore_v4i64_v4i8: |
| ; LMULMAX1: # %bb.0: |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 |
| ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 |
| ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 |
| ; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 |
| ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 |
| ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf4, tu, mu |
| ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 |
| ; LMULMAX1-NEXT: vse8.v v8, (a0) |
| ; LMULMAX1-NEXT: ret |
| ; |
| ; LMULMAX4-LABEL: truncstore_v4i64_v4i8: |
| ; LMULMAX4: # %bb.0: |
| ; LMULMAX4-NEXT: vsetivli zero, 4, e32, m1, ta, mu |
| ; LMULMAX4-NEXT: vncvt.x.x.w v10, v8 |
| ; LMULMAX4-NEXT: vsetvli zero, zero, e16, mf2, ta, mu |
| ; LMULMAX4-NEXT: vncvt.x.x.w v8, v10 |
| ; LMULMAX4-NEXT: vsetvli zero, zero, e8, mf4, ta, mu |
| ; LMULMAX4-NEXT: vncvt.x.x.w v8, v8 |
| ; LMULMAX4-NEXT: vse8.v v8, (a0) |
| ; LMULMAX4-NEXT: ret |
| %y = trunc <4 x i64> %x to <4 x i8> |
| store <4 x i8> %y, <4 x i8>* %z |
| ret void |
| } |
| |
| define void @truncstore_v4i64_v4i16(<4 x i64> %x, <4 x i16>* %z) { |
| ; LMULMAX1-LABEL: truncstore_v4i64_v4i16: |
| ; LMULMAX1: # %bb.0: |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 |
| ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 |
| ; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 |
| ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, tu, mu |
| ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 |
| ; LMULMAX1-NEXT: vse16.v v8, (a0) |
| ; LMULMAX1-NEXT: ret |
| ; |
| ; LMULMAX4-LABEL: truncstore_v4i64_v4i16: |
| ; LMULMAX4: # %bb.0: |
| ; LMULMAX4-NEXT: vsetivli zero, 4, e32, m1, ta, mu |
| ; LMULMAX4-NEXT: vncvt.x.x.w v10, v8 |
| ; LMULMAX4-NEXT: vsetvli zero, zero, e16, mf2, ta, mu |
| ; LMULMAX4-NEXT: vncvt.x.x.w v8, v10 |
| ; LMULMAX4-NEXT: vse16.v v8, (a0) |
| ; LMULMAX4-NEXT: ret |
| %y = trunc <4 x i64> %x to <4 x i16> |
| store <4 x i16> %y, <4 x i16>* %z |
| ret void |
| } |
| |
| define void @truncstore_v4i64_v4i32(<4 x i64> %x, <4 x i32>* %z) { |
| ; LMULMAX1-LABEL: truncstore_v4i64_v4i32: |
| ; LMULMAX1: # %bb.0: |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 |
| ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu |
| ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 |
| ; LMULMAX1-NEXT: vse32.v v8, (a0) |
| ; LMULMAX1-NEXT: ret |
| ; |
| ; LMULMAX4-LABEL: truncstore_v4i64_v4i32: |
| ; LMULMAX4: # %bb.0: |
| ; LMULMAX4-NEXT: vsetivli zero, 4, e32, m1, ta, mu |
| ; LMULMAX4-NEXT: vncvt.x.x.w v10, v8 |
| ; LMULMAX4-NEXT: vse32.v v10, (a0) |
| ; LMULMAX4-NEXT: ret |
| %y = trunc <4 x i64> %x to <4 x i32> |
| store <4 x i32> %y, <4 x i32>* %z |
| ret void |
| } |
| |
| define void @truncstore_v8i64_v8i8(<8 x i64> %x, <8 x i8>* %z) { |
| ; LMULMAX1-LABEL: truncstore_v8i64_v8i8: |
| ; LMULMAX1: # %bb.0: |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 |
| ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 |
| ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 |
| ; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 |
| ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 |
| ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, tu, mu |
| ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v10 |
| ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 |
| ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 |
| ; LMULMAX1-NEXT: vsetivli zero, 6, e8, mf2, tu, mu |
| ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v11 |
| ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 |
| ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 |
| ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu |
| ; LMULMAX1-NEXT: vslideup.vi v8, v9, 6 |
| ; LMULMAX1-NEXT: vse8.v v8, (a0) |
| ; LMULMAX1-NEXT: ret |
| ; |
| ; LMULMAX4-LABEL: truncstore_v8i64_v8i8: |
| ; LMULMAX4: # %bb.0: |
| ; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, mu |
| ; LMULMAX4-NEXT: vncvt.x.x.w v12, v8 |
| ; LMULMAX4-NEXT: vsetvli zero, zero, e16, m1, ta, mu |
| ; LMULMAX4-NEXT: vncvt.x.x.w v8, v12 |
| ; LMULMAX4-NEXT: vsetvli zero, zero, e8, mf2, ta, mu |
| ; LMULMAX4-NEXT: vncvt.x.x.w v8, v8 |
| ; LMULMAX4-NEXT: vse8.v v8, (a0) |
| ; LMULMAX4-NEXT: ret |
| %y = trunc <8 x i64> %x to <8 x i8> |
| store <8 x i8> %y, <8 x i8>* %z |
| ret void |
| } |
| |
| define void @truncstore_v8i64_v8i16(<8 x i64> %x, <8 x i16>* %z) { |
| ; LMULMAX1-LABEL: truncstore_v8i64_v8i16: |
| ; LMULMAX1: # %bb.0: |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 |
| ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 |
| ; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 |
| ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, tu, mu |
| ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v10 |
| ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 |
| ; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, tu, mu |
| ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v11 |
| ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 |
| ; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu |
| ; LMULMAX1-NEXT: vslideup.vi v8, v9, 6 |
| ; LMULMAX1-NEXT: vse16.v v8, (a0) |
| ; LMULMAX1-NEXT: ret |
| ; |
| ; LMULMAX4-LABEL: truncstore_v8i64_v8i16: |
| ; LMULMAX4: # %bb.0: |
| ; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, mu |
| ; LMULMAX4-NEXT: vncvt.x.x.w v12, v8 |
| ; LMULMAX4-NEXT: vsetvli zero, zero, e16, m1, ta, mu |
| ; LMULMAX4-NEXT: vncvt.x.x.w v8, v12 |
| ; LMULMAX4-NEXT: vse16.v v8, (a0) |
| ; LMULMAX4-NEXT: ret |
| %y = trunc <8 x i64> %x to <8 x i16> |
| store <8 x i16> %y, <8 x i16>* %z |
| ret void |
| } |
| |
| define void @truncstore_v8i64_v8i32(<8 x i64> %x, <8 x i32>* %z) { |
| ; LMULMAX1-LABEL: truncstore_v8i64_v8i32: |
| ; LMULMAX1: # %bb.0: |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 |
| ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu |
| ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v11 |
| ; LMULMAX1-NEXT: vncvt.x.x.w v10, v10 |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu |
| ; LMULMAX1-NEXT: vslideup.vi v10, v9, 2 |
| ; LMULMAX1-NEXT: addi a1, a0, 16 |
| ; LMULMAX1-NEXT: vse32.v v10, (a1) |
| ; LMULMAX1-NEXT: vse32.v v8, (a0) |
| ; LMULMAX1-NEXT: ret |
| ; |
| ; LMULMAX4-LABEL: truncstore_v8i64_v8i32: |
| ; LMULMAX4: # %bb.0: |
| ; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, mu |
| ; LMULMAX4-NEXT: vncvt.x.x.w v12, v8 |
| ; LMULMAX4-NEXT: vse32.v v12, (a0) |
| ; LMULMAX4-NEXT: ret |
| %y = trunc <8 x i64> %x to <8 x i32> |
| store <8 x i32> %y, <8 x i32>* %z |
| ret void |
| } |
| |
| define void @truncstore_v16i64_v16i8(<16 x i64> %x, <16 x i8>* %z) { |
| ; LMULMAX1-LABEL: truncstore_v16i64_v16i8: |
| ; LMULMAX1: # %bb.0: |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 |
| ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 |
| ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 |
| ; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 |
| ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 |
| ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e8, m1, tu, mu |
| ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v10 |
| ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 |
| ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 |
| ; LMULMAX1-NEXT: vsetivli zero, 6, e8, m1, tu, mu |
| ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v11 |
| ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 |
| ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 |
| ; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, tu, mu |
| ; LMULMAX1-NEXT: vslideup.vi v8, v9, 6 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v12 |
| ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 |
| ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 |
| ; LMULMAX1-NEXT: vsetivli zero, 10, e8, m1, tu, mu |
| ; LMULMAX1-NEXT: vslideup.vi v8, v9, 8 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v13 |
| ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 |
| ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 |
| ; LMULMAX1-NEXT: vsetivli zero, 12, e8, m1, tu, mu |
| ; LMULMAX1-NEXT: vslideup.vi v8, v9, 10 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v14 |
| ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 |
| ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 |
| ; LMULMAX1-NEXT: vsetivli zero, 14, e8, m1, tu, mu |
| ; LMULMAX1-NEXT: vslideup.vi v8, v9, 12 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v15 |
| ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 |
| ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 |
| ; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, tu, mu |
| ; LMULMAX1-NEXT: vslideup.vi v8, v9, 14 |
| ; LMULMAX1-NEXT: vse8.v v8, (a0) |
| ; LMULMAX1-NEXT: ret |
| ; |
| ; LMULMAX4-LABEL: truncstore_v16i64_v16i8: |
| ; LMULMAX4: # %bb.0: |
| ; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, mu |
| ; LMULMAX4-NEXT: vncvt.x.x.w v16, v12 |
| ; LMULMAX4-NEXT: vsetvli zero, zero, e16, m1, ta, mu |
| ; LMULMAX4-NEXT: vncvt.x.x.w v12, v16 |
| ; LMULMAX4-NEXT: vsetvli zero, zero, e8, mf2, ta, mu |
| ; LMULMAX4-NEXT: vncvt.x.x.w v12, v12 |
| ; LMULMAX4-NEXT: vsetvli zero, zero, e32, m2, ta, mu |
| ; LMULMAX4-NEXT: vncvt.x.x.w v14, v8 |
| ; LMULMAX4-NEXT: vsetvli zero, zero, e16, m1, ta, mu |
| ; LMULMAX4-NEXT: vncvt.x.x.w v8, v14 |
| ; LMULMAX4-NEXT: vsetvli zero, zero, e8, mf2, ta, mu |
| ; LMULMAX4-NEXT: vncvt.x.x.w v8, v8 |
| ; LMULMAX4-NEXT: vsetivli zero, 16, e8, m1, tu, mu |
| ; LMULMAX4-NEXT: vslideup.vi v8, v12, 8 |
| ; LMULMAX4-NEXT: vse8.v v8, (a0) |
| ; LMULMAX4-NEXT: ret |
| %y = trunc <16 x i64> %x to <16 x i8> |
| store <16 x i8> %y, <16 x i8>* %z |
| ret void |
| } |
| |
| define void @truncstore_v16i64_v16i16(<16 x i64> %x, <16 x i16>* %z) { |
| ; LMULMAX1-LABEL: truncstore_v16i64_v16i16: |
| ; LMULMAX1: # %bb.0: |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 |
| ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 |
| ; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 |
| ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, tu, mu |
| ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v10 |
| ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 |
| ; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, tu, mu |
| ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v11 |
| ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 |
| ; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu |
| ; LMULMAX1-NEXT: vslideup.vi v8, v9, 6 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v13 |
| ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 |
| ; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v10, v12 |
| ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v10, v10 |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, tu, mu |
| ; LMULMAX1-NEXT: vslideup.vi v10, v9, 2 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v14 |
| ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 |
| ; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, tu, mu |
| ; LMULMAX1-NEXT: vslideup.vi v10, v9, 4 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v15 |
| ; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 |
| ; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu |
| ; LMULMAX1-NEXT: vslideup.vi v10, v9, 6 |
| ; LMULMAX1-NEXT: addi a1, a0, 16 |
| ; LMULMAX1-NEXT: vse16.v v10, (a1) |
| ; LMULMAX1-NEXT: vse16.v v8, (a0) |
| ; LMULMAX1-NEXT: ret |
| ; |
| ; LMULMAX4-LABEL: truncstore_v16i64_v16i16: |
| ; LMULMAX4: # %bb.0: |
| ; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, mu |
| ; LMULMAX4-NEXT: vncvt.x.x.w v16, v12 |
| ; LMULMAX4-NEXT: vsetvli zero, zero, e16, m1, ta, mu |
| ; LMULMAX4-NEXT: vncvt.x.x.w v12, v16 |
| ; LMULMAX4-NEXT: vsetvli zero, zero, e32, m2, ta, mu |
| ; LMULMAX4-NEXT: vncvt.x.x.w v14, v8 |
| ; LMULMAX4-NEXT: vsetvli zero, zero, e16, m1, ta, mu |
| ; LMULMAX4-NEXT: vncvt.x.x.w v8, v14 |
| ; LMULMAX4-NEXT: vsetivli zero, 16, e16, m2, tu, mu |
| ; LMULMAX4-NEXT: vslideup.vi v8, v12, 8 |
| ; LMULMAX4-NEXT: vse16.v v8, (a0) |
| ; LMULMAX4-NEXT: ret |
| %y = trunc <16 x i64> %x to <16 x i16> |
| store <16 x i16> %y, <16 x i16>* %z |
| ret void |
| } |
| |
| define void @truncstore_v16i64_v16i32(<16 x i64> %x, <16 x i32>* %z) { |
| ; LMULMAX1-LABEL: truncstore_v16i64_v16i32: |
| ; LMULMAX1: # %bb.0: |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v9 |
| ; LMULMAX1-NEXT: vncvt.x.x.w v8, v8 |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu |
| ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v11 |
| ; LMULMAX1-NEXT: vncvt.x.x.w v10, v10 |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu |
| ; LMULMAX1-NEXT: vslideup.vi v10, v9, 2 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v13 |
| ; LMULMAX1-NEXT: vncvt.x.x.w v11, v12 |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu |
| ; LMULMAX1-NEXT: vslideup.vi v11, v9, 2 |
| ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu |
| ; LMULMAX1-NEXT: vncvt.x.x.w v9, v15 |
| ; LMULMAX1-NEXT: vncvt.x.x.w v12, v14 |
| ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu |
| ; LMULMAX1-NEXT: vslideup.vi v12, v9, 2 |
| ; LMULMAX1-NEXT: addi a1, a0, 48 |
| ; LMULMAX1-NEXT: vse32.v v12, (a1) |
| ; LMULMAX1-NEXT: addi a1, a0, 32 |
| ; LMULMAX1-NEXT: vse32.v v11, (a1) |
| ; LMULMAX1-NEXT: addi a1, a0, 16 |
| ; LMULMAX1-NEXT: vse32.v v10, (a1) |
| ; LMULMAX1-NEXT: vse32.v v8, (a0) |
| ; LMULMAX1-NEXT: ret |
| ; |
| ; LMULMAX4-LABEL: truncstore_v16i64_v16i32: |
| ; LMULMAX4: # %bb.0: |
| ; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, mu |
| ; LMULMAX4-NEXT: vncvt.x.x.w v16, v12 |
| ; LMULMAX4-NEXT: vncvt.x.x.w v12, v8 |
| ; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, tu, mu |
| ; LMULMAX4-NEXT: vslideup.vi v12, v16, 8 |
| ; LMULMAX4-NEXT: vse32.v v12, (a0) |
| ; LMULMAX4-NEXT: ret |
| %y = trunc <16 x i64> %x to <16 x i32> |
| store <16 x i32> %y, <16 x i32>* %z |
| ret void |
| } |
| |
| define <vscale x 2 x float> @extload_nxv2f16_nxv2f32(<vscale x 2 x half>* %x) { |
| ; CHECK-LABEL: extload_nxv2f16_nxv2f32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu |
| ; CHECK-NEXT: vle16.v v9, (a0) |
| ; CHECK-NEXT: vfwcvt.f.f.v v8, v9 |
| ; CHECK-NEXT: ret |
| %y = load <vscale x 2 x half>, <vscale x 2 x half>* %x |
| %z = fpext <vscale x 2 x half> %y to <vscale x 2 x float> |
| ret <vscale x 2 x float> %z |
| } |
| |
| define <vscale x 2 x double> @extload_nxv2f16_nxv2f64(<vscale x 2 x half>* %x) { |
| ; CHECK-LABEL: extload_nxv2f16_nxv2f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu |
| ; CHECK-NEXT: vle16.v v8, (a0) |
| ; CHECK-NEXT: vfwcvt.f.f.v v10, v8 |
| ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu |
| ; CHECK-NEXT: vfwcvt.f.f.v v8, v10 |
| ; CHECK-NEXT: ret |
| %y = load <vscale x 2 x half>, <vscale x 2 x half>* %x |
| %z = fpext <vscale x 2 x half> %y to <vscale x 2 x double> |
| ret <vscale x 2 x double> %z |
| } |
| |
| define <vscale x 4 x float> @extload_nxv4f16_nxv4f32(<vscale x 4 x half>* %x) { |
| ; CHECK-LABEL: extload_nxv4f16_nxv4f32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vl1re16.v v10, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu |
| ; CHECK-NEXT: vfwcvt.f.f.v v8, v10 |
| ; CHECK-NEXT: ret |
| %y = load <vscale x 4 x half>, <vscale x 4 x half>* %x |
| %z = fpext <vscale x 4 x half> %y to <vscale x 4 x float> |
| ret <vscale x 4 x float> %z |
| } |
| |
| define <vscale x 4 x double> @extload_nxv4f16_nxv4f64(<vscale x 4 x half>* %x) { |
| ; CHECK-LABEL: extload_nxv4f16_nxv4f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vl1re16.v v8, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu |
| ; CHECK-NEXT: vfwcvt.f.f.v v12, v8 |
| ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu |
| ; CHECK-NEXT: vfwcvt.f.f.v v8, v12 |
| ; CHECK-NEXT: ret |
| %y = load <vscale x 4 x half>, <vscale x 4 x half>* %x |
| %z = fpext <vscale x 4 x half> %y to <vscale x 4 x double> |
| ret <vscale x 4 x double> %z |
| } |
| |
| define <vscale x 8 x float> @extload_nxv8f16_nxv8f32(<vscale x 8 x half>* %x) { |
| ; CHECK-LABEL: extload_nxv8f16_nxv8f32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vl2re16.v v12, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu |
| ; CHECK-NEXT: vfwcvt.f.f.v v8, v12 |
| ; CHECK-NEXT: ret |
| %y = load <vscale x 8 x half>, <vscale x 8 x half>* %x |
| %z = fpext <vscale x 8 x half> %y to <vscale x 8 x float> |
| ret <vscale x 8 x float> %z |
| } |
| |
| define <vscale x 8 x double> @extload_nxv8f16_nxv8f64(<vscale x 8 x half>* %x) { |
| ; CHECK-LABEL: extload_nxv8f16_nxv8f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vl2re16.v v8, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu |
| ; CHECK-NEXT: vfwcvt.f.f.v v16, v8 |
| ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu |
| ; CHECK-NEXT: vfwcvt.f.f.v v8, v16 |
| ; CHECK-NEXT: ret |
| %y = load <vscale x 8 x half>, <vscale x 8 x half>* %x |
| %z = fpext <vscale x 8 x half> %y to <vscale x 8 x double> |
| ret <vscale x 8 x double> %z |
| } |
| |
| define <vscale x 16 x float> @extload_nxv16f16_nxv16f32(<vscale x 16 x half>* %x) { |
| ; CHECK-LABEL: extload_nxv16f16_nxv16f32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vl4re16.v v16, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu |
| ; CHECK-NEXT: vfwcvt.f.f.v v8, v16 |
| ; CHECK-NEXT: ret |
| %y = load <vscale x 16 x half>, <vscale x 16 x half>* %x |
| %z = fpext <vscale x 16 x half> %y to <vscale x 16 x float> |
| ret <vscale x 16 x float> %z |
| } |
| |
| define <vscale x 16 x double> @extload_nxv16f16_nxv16f64(<vscale x 16 x half>* %x) { |
| ; CHECK-LABEL: extload_nxv16f16_nxv16f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vl4re16.v v16, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu |
| ; CHECK-NEXT: vfwcvt.f.f.v v20, v16 |
| ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu |
| ; CHECK-NEXT: vfwcvt.f.f.v v8, v20 |
| ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu |
| ; CHECK-NEXT: vfwcvt.f.f.v v24, v18 |
| ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu |
| ; CHECK-NEXT: vfwcvt.f.f.v v16, v24 |
| ; CHECK-NEXT: ret |
| %y = load <vscale x 16 x half>, <vscale x 16 x half>* %x |
| %z = fpext <vscale x 16 x half> %y to <vscale x 16 x double> |
| ret <vscale x 16 x double> %z |
| } |
| |
| define void @truncstore_nxv2f32_nxv2f16(<vscale x 2 x float> %x, <vscale x 2 x half>* %z) { |
| ; CHECK-LABEL: truncstore_nxv2f32_nxv2f16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu |
| ; CHECK-NEXT: vfncvt.f.f.w v9, v8 |
| ; CHECK-NEXT: vse16.v v9, (a0) |
| ; CHECK-NEXT: ret |
| %y = fptrunc <vscale x 2 x float> %x to <vscale x 2 x half> |
| store <vscale x 2 x half> %y, <vscale x 2 x half>* %z |
| ret void |
| } |
| |
| define <vscale x 2 x double> @extload_nxv2f32_nxv2f64(<vscale x 2 x float>* %x) { |
| ; CHECK-LABEL: extload_nxv2f32_nxv2f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vl1re32.v v10, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu |
| ; CHECK-NEXT: vfwcvt.f.f.v v8, v10 |
| ; CHECK-NEXT: ret |
| %y = load <vscale x 2 x float>, <vscale x 2 x float>* %x |
| %z = fpext <vscale x 2 x float> %y to <vscale x 2 x double> |
| ret <vscale x 2 x double> %z |
| } |
| |
| define void @truncstore_nxv4f32_nxv4f16(<vscale x 4 x float> %x, <vscale x 4 x half>* %z) { |
| ; CHECK-LABEL: truncstore_nxv4f32_nxv4f16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu |
| ; CHECK-NEXT: vfncvt.f.f.w v10, v8 |
| ; CHECK-NEXT: vs1r.v v10, (a0) |
| ; CHECK-NEXT: ret |
| %y = fptrunc <vscale x 4 x float> %x to <vscale x 4 x half> |
| store <vscale x 4 x half> %y, <vscale x 4 x half>* %z |
| ret void |
| } |
| |
| define <vscale x 4 x double> @extload_nxv4f32_nxv4f64(<vscale x 4 x float>* %x) { |
| ; CHECK-LABEL: extload_nxv4f32_nxv4f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vl2re32.v v12, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu |
| ; CHECK-NEXT: vfwcvt.f.f.v v8, v12 |
| ; CHECK-NEXT: ret |
| %y = load <vscale x 4 x float>, <vscale x 4 x float>* %x |
| %z = fpext <vscale x 4 x float> %y to <vscale x 4 x double> |
| ret <vscale x 4 x double> %z |
| } |
| |
| define void @truncstore_nxv8f32_nxv8f16(<vscale x 8 x float> %x, <vscale x 8 x half>* %z) { |
| ; CHECK-LABEL: truncstore_nxv8f32_nxv8f16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu |
| ; CHECK-NEXT: vfncvt.f.f.w v12, v8 |
| ; CHECK-NEXT: vs2r.v v12, (a0) |
| ; CHECK-NEXT: ret |
| %y = fptrunc <vscale x 8 x float> %x to <vscale x 8 x half> |
| store <vscale x 8 x half> %y, <vscale x 8 x half>* %z |
| ret void |
| } |
| |
| define <vscale x 8 x double> @extload_nxv8f32_nxv8f64(<vscale x 8 x float>* %x) { |
| ; CHECK-LABEL: extload_nxv8f32_nxv8f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vl4re32.v v16, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu |
| ; CHECK-NEXT: vfwcvt.f.f.v v8, v16 |
| ; CHECK-NEXT: ret |
| %y = load <vscale x 8 x float>, <vscale x 8 x float>* %x |
| %z = fpext <vscale x 8 x float> %y to <vscale x 8 x double> |
| ret <vscale x 8 x double> %z |
| } |
| |
| define void @truncstore_nxv16f32_nxv16f16(<vscale x 16 x float> %x, <vscale x 16 x half>* %z) { |
| ; CHECK-LABEL: truncstore_nxv16f32_nxv16f16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu |
| ; CHECK-NEXT: vfncvt.f.f.w v16, v8 |
| ; CHECK-NEXT: vs4r.v v16, (a0) |
| ; CHECK-NEXT: ret |
| %y = fptrunc <vscale x 16 x float> %x to <vscale x 16 x half> |
| store <vscale x 16 x half> %y, <vscale x 16 x half>* %z |
| ret void |
| } |
| |
| define <vscale x 16 x double> @extload_nxv16f32_nxv16f64(<vscale x 16 x float>* %x) { |
| ; CHECK-LABEL: extload_nxv16f32_nxv16f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vl8re32.v v24, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu |
| ; CHECK-NEXT: vfwcvt.f.f.v v8, v24 |
| ; CHECK-NEXT: vfwcvt.f.f.v v16, v28 |
| ; CHECK-NEXT: ret |
| %y = load <vscale x 16 x float>, <vscale x 16 x float>* %x |
| %z = fpext <vscale x 16 x float> %y to <vscale x 16 x double> |
| ret <vscale x 16 x double> %z |
| } |
| |
| define void @truncstore_nxv2f64_nxv2f16(<vscale x 2 x double> %x, <vscale x 2 x half>* %z) { |
| ; CHECK-LABEL: truncstore_nxv2f64_nxv2f16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu |
| ; CHECK-NEXT: vfncvt.rod.f.f.w v10, v8 |
| ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu |
| ; CHECK-NEXT: vfncvt.f.f.w v8, v10 |
| ; CHECK-NEXT: vse16.v v8, (a0) |
| ; CHECK-NEXT: ret |
| %y = fptrunc <vscale x 2 x double> %x to <vscale x 2 x half> |
| store <vscale x 2 x half> %y, <vscale x 2 x half>* %z |
| ret void |
| } |
| |
| define void @truncstore_nxv2f64_nxv2f32(<vscale x 2 x double> %x, <vscale x 2 x float>* %z) { |
| ; CHECK-LABEL: truncstore_nxv2f64_nxv2f32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu |
| ; CHECK-NEXT: vfncvt.f.f.w v10, v8 |
| ; CHECK-NEXT: vs1r.v v10, (a0) |
| ; CHECK-NEXT: ret |
| %y = fptrunc <vscale x 2 x double> %x to <vscale x 2 x float> |
| store <vscale x 2 x float> %y, <vscale x 2 x float>* %z |
| ret void |
| } |
| |
| define void @truncstore_nxv4f64_nxv4f16(<vscale x 4 x double> %x, <vscale x 4 x half>* %z) { |
| ; CHECK-LABEL: truncstore_nxv4f64_nxv4f16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu |
| ; CHECK-NEXT: vfncvt.rod.f.f.w v12, v8 |
| ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu |
| ; CHECK-NEXT: vfncvt.f.f.w v8, v12 |
| ; CHECK-NEXT: vs1r.v v8, (a0) |
| ; CHECK-NEXT: ret |
| %y = fptrunc <vscale x 4 x double> %x to <vscale x 4 x half> |
| store <vscale x 4 x half> %y, <vscale x 4 x half>* %z |
| ret void |
| } |
| |
| define void @truncstore_nxv4f64_nxv4f32(<vscale x 4 x double> %x, <vscale x 4 x float>* %z) { |
| ; CHECK-LABEL: truncstore_nxv4f64_nxv4f32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu |
| ; CHECK-NEXT: vfncvt.f.f.w v12, v8 |
| ; CHECK-NEXT: vs2r.v v12, (a0) |
| ; CHECK-NEXT: ret |
| %y = fptrunc <vscale x 4 x double> %x to <vscale x 4 x float> |
| store <vscale x 4 x float> %y, <vscale x 4 x float>* %z |
| ret void |
| } |
| |
| define void @truncstore_nxv8f64_nxv8f16(<vscale x 8 x double> %x, <vscale x 8 x half>* %z) { |
| ; CHECK-LABEL: truncstore_nxv8f64_nxv8f16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu |
| ; CHECK-NEXT: vfncvt.rod.f.f.w v16, v8 |
| ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu |
| ; CHECK-NEXT: vfncvt.f.f.w v8, v16 |
| ; CHECK-NEXT: vs2r.v v8, (a0) |
| ; CHECK-NEXT: ret |
| %y = fptrunc <vscale x 8 x double> %x to <vscale x 8 x half> |
| store <vscale x 8 x half> %y, <vscale x 8 x half>* %z |
| ret void |
| } |
| |
| define void @truncstore_nxv8f64_nxv8f32(<vscale x 8 x double> %x, <vscale x 8 x float>* %z) { |
| ; CHECK-LABEL: truncstore_nxv8f64_nxv8f32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu |
| ; CHECK-NEXT: vfncvt.f.f.w v16, v8 |
| ; CHECK-NEXT: vs4r.v v16, (a0) |
| ; CHECK-NEXT: ret |
| %y = fptrunc <vscale x 8 x double> %x to <vscale x 8 x float> |
| store <vscale x 8 x float> %y, <vscale x 8 x float>* %z |
| ret void |
| } |
| |
| define void @truncstore_nxv16f64_nxv16f16(<vscale x 16 x double> %x, <vscale x 16 x half>* %z) { |
| ; CHECK-LABEL: truncstore_nxv16f64_nxv16f16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu |
| ; CHECK-NEXT: vfncvt.rod.f.f.w v24, v8 |
| ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu |
| ; CHECK-NEXT: vfncvt.f.f.w v8, v24 |
| ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu |
| ; CHECK-NEXT: vfncvt.rod.f.f.w v12, v16 |
| ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu |
| ; CHECK-NEXT: vfncvt.f.f.w v10, v12 |
| ; CHECK-NEXT: vs4r.v v8, (a0) |
| ; CHECK-NEXT: ret |
| %y = fptrunc <vscale x 16 x double> %x to <vscale x 16 x half> |
| store <vscale x 16 x half> %y, <vscale x 16 x half>* %z |
| ret void |
| } |
| |
| define void @truncstore_nxv16f64_nxv16f32(<vscale x 16 x double> %x, <vscale x 16 x float>* %z) { |
| ; CHECK-LABEL: truncstore_nxv16f64_nxv16f32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu |
| ; CHECK-NEXT: vfncvt.f.f.w v24, v8 |
| ; CHECK-NEXT: vfncvt.f.f.w v28, v16 |
| ; CHECK-NEXT: vs8r.v v24, (a0) |
| ; CHECK-NEXT: ret |
| %y = fptrunc <vscale x 16 x double> %x to <vscale x 16 x float> |
| store <vscale x 16 x float> %y, <vscale x 16 x float>* %z |
| ret void |
| } |