| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zvlsseg,+experimental-zfh \ |
| ; RUN: -verify-machineinstrs < %s | FileCheck %s |
| |
| declare void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i16(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 16 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i16(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 16 x i16>, <vscale x 16 x i1>, i64) |
| |
| define void @test_vsoxseg2_nxv16i16_nxv16i16(<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_nxv16i16_nxv16i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 |
| ; CHECK-NEXT: vmv4r.v v16, v12 |
| ; CHECK-NEXT: vmv4r.v v12, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg2_mask_nxv16i16_nxv16i16(<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_mask_nxv16i16_nxv16i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 |
| ; CHECK-NEXT: vmv4r.v v16, v12 |
| ; CHECK-NEXT: vmv4r.v v12, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i8(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 16 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i8(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 16 x i8>, <vscale x 16 x i1>, i64) |
| |
| define void @test_vsoxseg2_nxv16i16_nxv16i8(<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_nxv16i16_nxv16i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 |
| ; CHECK-NEXT: vmv2r.v v16, v12 |
| ; CHECK-NEXT: vmv4r.v v12, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i8(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg2_mask_nxv16i16_nxv16i8(<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_mask_nxv16i16_nxv16i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 |
| ; CHECK-NEXT: vmv2r.v v16, v12 |
| ; CHECK-NEXT: vmv4r.v v12, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i8(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i32(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 16 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i32(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 16 x i32>, <vscale x 16 x i1>, i64) |
| |
| define void @test_vsoxseg2_nxv16i16_nxv16i32(<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_nxv16i16_nxv16i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 |
| ; CHECK-NEXT: vmv4r.v v12, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i32(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg2_mask_nxv16i16_nxv16i32(<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_mask_nxv16i16_nxv16i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 |
| ; CHECK-NEXT: vmv4r.v v12, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i32(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i32>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg2_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_nxv4i32_nxv4i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 |
| ; CHECK-NEXT: vmv2r.v v12, v10 |
| ; CHECK-NEXT: vmv2r.v v10, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg2_mask_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_mask_nxv4i32_nxv4i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 |
| ; CHECK-NEXT: vmv2r.v v12, v10 |
| ; CHECK-NEXT: vmv2r.v v10, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i8(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i8(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i8>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg2_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_nxv4i32_nxv4i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv2r.v v10, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg2_mask_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_mask_nxv4i32_nxv4i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv2r.v v10, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i64(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i64>, i64) |
| declare void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i64(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i64>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg2_nxv4i32_nxv4i64(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i64> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_nxv4i32_nxv4i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 |
| ; CHECK-NEXT: vmv2r.v v10, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i64(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i64> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg2_mask_nxv4i32_nxv4i64(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_mask_nxv4i32_nxv4i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 |
| ; CHECK-NEXT: vmv2r.v v10, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i64(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i16(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i16(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i16>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg2_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_nxv4i32_nxv4i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv2r.v v10, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg2_mask_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_mask_nxv4i32_nxv4i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv2r.v v10, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i32>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg3_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_nxv4i32_nxv4i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv2r.v v12, v8 |
| ; CHECK-NEXT: vmv2r.v v14, v12 |
| ; CHECK-NEXT: vmv2r.v v16, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg3_mask_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_mask_nxv4i32_nxv4i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv2r.v v12, v8 |
| ; CHECK-NEXT: vmv2r.v v14, v12 |
| ; CHECK-NEXT: vmv2r.v v16, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i8(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i8(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i8>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg3_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_nxv4i32_nxv4i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv2r.v v12, v8 |
| ; CHECK-NEXT: vmv2r.v v14, v12 |
| ; CHECK-NEXT: vmv2r.v v16, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg3_mask_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_mask_nxv4i32_nxv4i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv2r.v v12, v8 |
| ; CHECK-NEXT: vmv2r.v v14, v12 |
| ; CHECK-NEXT: vmv2r.v v16, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i64(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i64>, i64) |
| declare void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i64(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i64>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg3_nxv4i32_nxv4i64(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i64> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_nxv4i32_nxv4i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv2r.v v16, v8 |
| ; CHECK-NEXT: vmv2r.v v18, v16 |
| ; CHECK-NEXT: vmv2r.v v20, v16 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei64.v v16, (a0), v12 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i64(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i64> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg3_mask_nxv4i32_nxv4i64(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_mask_nxv4i32_nxv4i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv2r.v v16, v8 |
| ; CHECK-NEXT: vmv2r.v v18, v16 |
| ; CHECK-NEXT: vmv2r.v v20, v16 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei64.v v16, (a0), v12, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i64(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i16(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i16(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i16>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg3_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_nxv4i32_nxv4i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv2r.v v12, v8 |
| ; CHECK-NEXT: vmv2r.v v14, v12 |
| ; CHECK-NEXT: vmv2r.v v16, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg3_mask_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_mask_nxv4i32_nxv4i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv2r.v v12, v8 |
| ; CHECK-NEXT: vmv2r.v v14, v12 |
| ; CHECK-NEXT: vmv2r.v v16, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i32>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg4_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_nxv4i32_nxv4i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv2r.v v12, v8 |
| ; CHECK-NEXT: vmv2r.v v14, v12 |
| ; CHECK-NEXT: vmv2r.v v16, v12 |
| ; CHECK-NEXT: vmv2r.v v18, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg4_mask_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_mask_nxv4i32_nxv4i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv2r.v v12, v8 |
| ; CHECK-NEXT: vmv2r.v v14, v12 |
| ; CHECK-NEXT: vmv2r.v v16, v12 |
| ; CHECK-NEXT: vmv2r.v v18, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i8(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i8(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i8>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg4_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_nxv4i32_nxv4i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv2r.v v12, v8 |
| ; CHECK-NEXT: vmv2r.v v14, v12 |
| ; CHECK-NEXT: vmv2r.v v16, v12 |
| ; CHECK-NEXT: vmv2r.v v18, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg4_mask_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_mask_nxv4i32_nxv4i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv2r.v v12, v8 |
| ; CHECK-NEXT: vmv2r.v v14, v12 |
| ; CHECK-NEXT: vmv2r.v v16, v12 |
| ; CHECK-NEXT: vmv2r.v v18, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i64(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i64>, i64) |
| declare void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i64(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i64>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg4_nxv4i32_nxv4i64(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i64> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_nxv4i32_nxv4i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv2r.v v16, v8 |
| ; CHECK-NEXT: vmv2r.v v18, v16 |
| ; CHECK-NEXT: vmv2r.v v20, v16 |
| ; CHECK-NEXT: vmv2r.v v22, v16 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei64.v v16, (a0), v12 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i64(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i64> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg4_mask_nxv4i32_nxv4i64(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_mask_nxv4i32_nxv4i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv2r.v v16, v8 |
| ; CHECK-NEXT: vmv2r.v v18, v16 |
| ; CHECK-NEXT: vmv2r.v v20, v16 |
| ; CHECK-NEXT: vmv2r.v v22, v16 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei64.v v16, (a0), v12, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i64(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i16(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i16(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i16>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg4_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_nxv4i32_nxv4i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv2r.v v12, v8 |
| ; CHECK-NEXT: vmv2r.v v14, v12 |
| ; CHECK-NEXT: vmv2r.v v16, v12 |
| ; CHECK-NEXT: vmv2r.v v18, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg4_mask_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_mask_nxv4i32_nxv4i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv2r.v v12, v8 |
| ; CHECK-NEXT: vmv2r.v v14, v12 |
| ; CHECK-NEXT: vmv2r.v v16, v12 |
| ; CHECK-NEXT: vmv2r.v v18, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i16(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 16 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i16(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 16 x i16>, <vscale x 16 x i1>, i64) |
| |
| define void @test_vsoxseg2_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_nxv16i8_nxv16i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 |
| ; CHECK-NEXT: vmv2r.v v10, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg2_mask_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_mask_nxv16i8_nxv16i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 |
| ; CHECK-NEXT: vmv2r.v v10, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 16 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 16 x i8>, <vscale x 16 x i1>, i64) |
| |
| define void @test_vsoxseg2_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_nxv16i8_nxv16i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 |
| ; CHECK-NEXT: vmv2r.v v12, v10 |
| ; CHECK-NEXT: vmv2r.v v10, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg2_mask_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_mask_nxv16i8_nxv16i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 |
| ; CHECK-NEXT: vmv2r.v v12, v10 |
| ; CHECK-NEXT: vmv2r.v v10, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i32(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 16 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i32(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 16 x i32>, <vscale x 16 x i1>, i64) |
| |
| define void @test_vsoxseg2_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_nxv16i8_nxv16i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 |
| ; CHECK-NEXT: vmv2r.v v10, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg2_mask_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_mask_nxv16i8_nxv16i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 |
| ; CHECK-NEXT: vmv2r.v v10, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i16(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 16 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i16(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 16 x i16>, <vscale x 16 x i1>, i64) |
| |
| define void @test_vsoxseg3_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_nxv16i8_nxv16i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv2r.v v16, v8 |
| ; CHECK-NEXT: vmv2r.v v18, v16 |
| ; CHECK-NEXT: vmv2r.v v20, v16 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei16.v v16, (a0), v12 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg3_mask_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_mask_nxv16i8_nxv16i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv2r.v v16, v8 |
| ; CHECK-NEXT: vmv2r.v v18, v16 |
| ; CHECK-NEXT: vmv2r.v v20, v16 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei16.v v16, (a0), v12, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 16 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 16 x i8>, <vscale x 16 x i1>, i64) |
| |
| define void @test_vsoxseg3_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_nxv16i8_nxv16i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv2r.v v12, v8 |
| ; CHECK-NEXT: vmv2r.v v14, v12 |
| ; CHECK-NEXT: vmv2r.v v16, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg3_mask_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_mask_nxv16i8_nxv16i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv2r.v v12, v8 |
| ; CHECK-NEXT: vmv2r.v v14, v12 |
| ; CHECK-NEXT: vmv2r.v v16, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i32(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 16 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i32(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 16 x i32>, <vscale x 16 x i1>, i64) |
| |
| define void @test_vsoxseg3_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_nxv16i8_nxv16i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 |
| ; CHECK-NEXT: vmv2r.v v10, v8 |
| ; CHECK-NEXT: vmv2r.v v12, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v16 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg3_mask_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_mask_nxv16i8_nxv16i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 |
| ; CHECK-NEXT: vmv2r.v v10, v8 |
| ; CHECK-NEXT: vmv2r.v v12, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v16, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i16(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 16 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i16(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 16 x i16>, <vscale x 16 x i1>, i64) |
| |
| define void @test_vsoxseg4_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_nxv16i8_nxv16i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv2r.v v16, v8 |
| ; CHECK-NEXT: vmv2r.v v18, v16 |
| ; CHECK-NEXT: vmv2r.v v20, v16 |
| ; CHECK-NEXT: vmv2r.v v22, v16 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei16.v v16, (a0), v12 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg4_mask_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_mask_nxv16i8_nxv16i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv2r.v v16, v8 |
| ; CHECK-NEXT: vmv2r.v v18, v16 |
| ; CHECK-NEXT: vmv2r.v v20, v16 |
| ; CHECK-NEXT: vmv2r.v v22, v16 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei16.v v16, (a0), v12, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 16 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 16 x i8>, <vscale x 16 x i1>, i64) |
| |
| define void @test_vsoxseg4_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_nxv16i8_nxv16i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv2r.v v12, v8 |
| ; CHECK-NEXT: vmv2r.v v14, v12 |
| ; CHECK-NEXT: vmv2r.v v16, v12 |
| ; CHECK-NEXT: vmv2r.v v18, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg4_mask_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_mask_nxv16i8_nxv16i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv2r.v v12, v8 |
| ; CHECK-NEXT: vmv2r.v v14, v12 |
| ; CHECK-NEXT: vmv2r.v v16, v12 |
| ; CHECK-NEXT: vmv2r.v v18, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i32(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 16 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i32(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 16 x i32>, <vscale x 16 x i1>, i64) |
| |
| define void @test_vsoxseg4_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_nxv16i8_nxv16i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2 |
| ; CHECK-NEXT: vmv2r.v v10, v8 |
| ; CHECK-NEXT: vmv2r.v v12, v8 |
| ; CHECK-NEXT: vmv2r.v v14, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v16 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg4_mask_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_mask_nxv16i8_nxv16i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2 |
| ; CHECK-NEXT: vmv2r.v v10, v8 |
| ; CHECK-NEXT: vmv2r.v v12, v8 |
| ; CHECK-NEXT: vmv2r.v v14, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v16, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i64>, i64) |
| declare void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg2_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i64> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_nxv1i64_nxv1i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v10, v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i64> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg2_mask_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_mask_nxv1i64_nxv1i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v10, v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i32(<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i32(<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i32>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg2_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_nxv1i64_nxv1i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v10, v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg2_mask_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_mask_nxv1i64_nxv1i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v10, v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i16(<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i16(<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg2_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_nxv1i64_nxv1i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v10, v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg2_mask_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_mask_nxv1i64_nxv1i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v10, v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i8(<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i8(<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg2_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_nxv1i64_nxv1i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v10, v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg2_mask_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_mask_nxv1i64_nxv1i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v10, v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i64>, i64) |
| declare void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg3_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i64> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_nxv1i64_nxv1i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i64> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg3_mask_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i64_nxv1i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i32>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg3_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_nxv1i64_nxv1i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg3_mask_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i64_nxv1i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg3_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_nxv1i64_nxv1i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg3_mask_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i64_nxv1i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg3_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_nxv1i64_nxv1i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg3_mask_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i64_nxv1i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i64>, i64) |
| declare void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg4_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i64> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_nxv1i64_nxv1i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i64> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg4_mask_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i64_nxv1i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i32>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg4_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_nxv1i64_nxv1i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg4_mask_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i64_nxv1i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg4_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_nxv1i64_nxv1i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg4_mask_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i64_nxv1i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg4_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_nxv1i64_nxv1i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg4_mask_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i64_nxv1i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i64>, i64) |
| declare void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg5_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i64> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_nxv1i64_nxv1i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i64> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg5_mask_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i64_nxv1i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i32>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg5_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_nxv1i64_nxv1i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg5_mask_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i64_nxv1i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg5_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_nxv1i64_nxv1i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg5_mask_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i64_nxv1i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg5_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_nxv1i64_nxv1i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg5_mask_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i64_nxv1i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i64>, i64) |
| declare void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg6_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i64> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_nxv1i64_nxv1i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i64> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg6_mask_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i64_nxv1i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i32>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg6_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_nxv1i64_nxv1i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg6_mask_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i64_nxv1i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg6_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_nxv1i64_nxv1i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg6_mask_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i64_nxv1i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg6_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_nxv1i64_nxv1i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg6_mask_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i64_nxv1i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i64>, i64) |
| declare void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg7_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i64> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg7_nxv1i64_nxv1i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i64> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg7_mask_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i64_nxv1i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i32>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg7_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg7_nxv1i64_nxv1i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg7_mask_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i64_nxv1i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg7_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg7_nxv1i64_nxv1i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg7_mask_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i64_nxv1i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg7_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg7_nxv1i64_nxv1i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg7_mask_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i64_nxv1i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i64>, i64) |
| declare void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg8_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i64> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg8_nxv1i64_nxv1i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vmv1r.v v17, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i64> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg8_mask_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg8_mask_nxv1i64_nxv1i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vmv1r.v v17, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i32>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg8_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg8_nxv1i64_nxv1i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vmv1r.v v17, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg8_mask_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg8_mask_nxv1i64_nxv1i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vmv1r.v v17, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg8_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg8_nxv1i64_nxv1i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vmv1r.v v17, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg8_mask_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg8_mask_nxv1i64_nxv1i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vmv1r.v v17, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg8_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg8_nxv1i64_nxv1i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vmv1r.v v17, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg8_mask_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg8_mask_nxv1i64_nxv1i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vmv1r.v v17, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i64(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i64>, i64) |
| declare void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i64(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg2_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i64> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_nxv1i32_nxv1i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v10, v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i64> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg2_mask_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_mask_nxv1i32_nxv1i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v10, v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i32>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg2_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_nxv1i32_nxv1i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v10, v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg2_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_mask_nxv1i32_nxv1i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v10, v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg2_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_nxv1i32_nxv1i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v10, v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg2_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_mask_nxv1i32_nxv1i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v10, v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg2_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_nxv1i32_nxv1i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v10, v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg2_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_mask_nxv1i32_nxv1i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v10, v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i64>, i64) |
| declare void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg3_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i64> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_nxv1i32_nxv1i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i64> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg3_mask_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i32_nxv1i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i32>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg3_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_nxv1i32_nxv1i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg3_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i32_nxv1i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg3_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_nxv1i32_nxv1i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg3_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i32_nxv1i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg3_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_nxv1i32_nxv1i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg3_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i32_nxv1i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i64>, i64) |
| declare void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg4_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i64> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_nxv1i32_nxv1i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i64> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg4_mask_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i32_nxv1i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i32>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg4_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_nxv1i32_nxv1i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg4_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i32_nxv1i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg4_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_nxv1i32_nxv1i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg4_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i32_nxv1i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg4_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_nxv1i32_nxv1i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg4_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i32_nxv1i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i64>, i64) |
| declare void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg5_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i64> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_nxv1i32_nxv1i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i64> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg5_mask_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i32_nxv1i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i32>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg5_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_nxv1i32_nxv1i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg5_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i32_nxv1i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg5_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_nxv1i32_nxv1i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg5_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i32_nxv1i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg5_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_nxv1i32_nxv1i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg5_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i32_nxv1i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i64>, i64) |
| declare void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg6_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i64> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_nxv1i32_nxv1i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i64> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg6_mask_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i32_nxv1i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i32>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg6_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_nxv1i32_nxv1i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg6_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i32_nxv1i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg6_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_nxv1i32_nxv1i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg6_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i32_nxv1i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg6_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_nxv1i32_nxv1i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg6_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i32_nxv1i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i64>, i64) |
| declare void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg7_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i64> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg7_nxv1i32_nxv1i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i64> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg7_mask_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i32_nxv1i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i32>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg7_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg7_nxv1i32_nxv1i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg7_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i32_nxv1i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg7_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg7_nxv1i32_nxv1i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg7_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i32_nxv1i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg7_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg7_nxv1i32_nxv1i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg7_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i32_nxv1i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i64>, i64) |
| declare void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg8_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i64> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg8_nxv1i32_nxv1i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vmv1r.v v17, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i64> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg8_mask_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg8_mask_nxv1i32_nxv1i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vmv1r.v v17, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i32>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg8_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg8_nxv1i32_nxv1i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vmv1r.v v17, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg8_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg8_mask_nxv1i32_nxv1i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vmv1r.v v17, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg8_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg8_nxv1i32_nxv1i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vmv1r.v v17, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg8_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg8_mask_nxv1i32_nxv1i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vmv1r.v v17, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg8_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg8_nxv1i32_nxv1i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vmv1r.v v17, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg8_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg8_mask_nxv1i32_nxv1i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vmv1r.v v17, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i16>, <vscale x 8 x i1>, i64) |
| |
| define void @test_vsoxseg2_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_nxv8i16_nxv8i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 |
| ; CHECK-NEXT: vmv2r.v v12, v10 |
| ; CHECK-NEXT: vmv2r.v v10, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg2_mask_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_mask_nxv8i16_nxv8i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 |
| ; CHECK-NEXT: vmv2r.v v12, v10 |
| ; CHECK-NEXT: vmv2r.v v10, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i8(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i8(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i8>, <vscale x 8 x i1>, i64) |
| |
| define void @test_vsoxseg2_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_nxv8i16_nxv8i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv2r.v v10, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg2_mask_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_mask_nxv8i16_nxv8i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv2r.v v10, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i64(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i64>, i64) |
| declare void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i64(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i64>, <vscale x 8 x i1>, i64) |
| |
| define void @test_vsoxseg2_nxv8i16_nxv8i64(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i64> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_nxv8i16_nxv8i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 |
| ; CHECK-NEXT: vmv2r.v v10, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i64(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i64> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg2_mask_nxv8i16_nxv8i64(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_mask_nxv8i16_nxv8i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 |
| ; CHECK-NEXT: vmv2r.v v10, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i64(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i32(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i32(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i32>, <vscale x 8 x i1>, i64) |
| |
| define void @test_vsoxseg2_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_nxv8i16_nxv8i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 |
| ; CHECK-NEXT: vmv2r.v v10, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg2_mask_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_mask_nxv8i16_nxv8i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 |
| ; CHECK-NEXT: vmv2r.v v10, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i16>, <vscale x 8 x i1>, i64) |
| |
| define void @test_vsoxseg3_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_nxv8i16_nxv8i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv2r.v v12, v8 |
| ; CHECK-NEXT: vmv2r.v v14, v12 |
| ; CHECK-NEXT: vmv2r.v v16, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg3_mask_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_mask_nxv8i16_nxv8i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv2r.v v12, v8 |
| ; CHECK-NEXT: vmv2r.v v14, v12 |
| ; CHECK-NEXT: vmv2r.v v16, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i8(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i8(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i8>, <vscale x 8 x i1>, i64) |
| |
| define void @test_vsoxseg3_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_nxv8i16_nxv8i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv2r.v v12, v8 |
| ; CHECK-NEXT: vmv2r.v v14, v12 |
| ; CHECK-NEXT: vmv2r.v v16, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg3_mask_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_mask_nxv8i16_nxv8i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv2r.v v12, v8 |
| ; CHECK-NEXT: vmv2r.v v14, v12 |
| ; CHECK-NEXT: vmv2r.v v16, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i64(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i64>, i64) |
| declare void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i64(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i64>, <vscale x 8 x i1>, i64) |
| |
| define void @test_vsoxseg3_nxv8i16_nxv8i64(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i64> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_nxv8i16_nxv8i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 |
| ; CHECK-NEXT: vmv2r.v v10, v8 |
| ; CHECK-NEXT: vmv2r.v v12, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v16 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i64(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i64> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg3_mask_nxv8i16_nxv8i64(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_mask_nxv8i16_nxv8i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 |
| ; CHECK-NEXT: vmv2r.v v10, v8 |
| ; CHECK-NEXT: vmv2r.v v12, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v16, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i64(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i32(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i32(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i32>, <vscale x 8 x i1>, i64) |
| |
| define void @test_vsoxseg3_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_nxv8i16_nxv8i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv2r.v v16, v8 |
| ; CHECK-NEXT: vmv2r.v v18, v16 |
| ; CHECK-NEXT: vmv2r.v v20, v16 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei32.v v16, (a0), v12 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg3_mask_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_mask_nxv8i16_nxv8i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv2r.v v16, v8 |
| ; CHECK-NEXT: vmv2r.v v18, v16 |
| ; CHECK-NEXT: vmv2r.v v20, v16 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei32.v v16, (a0), v12, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i16>, <vscale x 8 x i1>, i64) |
| |
| define void @test_vsoxseg4_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_nxv8i16_nxv8i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv2r.v v12, v8 |
| ; CHECK-NEXT: vmv2r.v v14, v12 |
| ; CHECK-NEXT: vmv2r.v v16, v12 |
| ; CHECK-NEXT: vmv2r.v v18, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg4_mask_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_mask_nxv8i16_nxv8i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv2r.v v12, v8 |
| ; CHECK-NEXT: vmv2r.v v14, v12 |
| ; CHECK-NEXT: vmv2r.v v16, v12 |
| ; CHECK-NEXT: vmv2r.v v18, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i8(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i8(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i8>, <vscale x 8 x i1>, i64) |
| |
| define void @test_vsoxseg4_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_nxv8i16_nxv8i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv2r.v v12, v8 |
| ; CHECK-NEXT: vmv2r.v v14, v12 |
| ; CHECK-NEXT: vmv2r.v v16, v12 |
| ; CHECK-NEXT: vmv2r.v v18, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg4_mask_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_mask_nxv8i16_nxv8i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv2r.v v12, v8 |
| ; CHECK-NEXT: vmv2r.v v14, v12 |
| ; CHECK-NEXT: vmv2r.v v16, v12 |
| ; CHECK-NEXT: vmv2r.v v18, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i64(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i64>, i64) |
| declare void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i64(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i64>, <vscale x 8 x i1>, i64) |
| |
| define void @test_vsoxseg4_nxv8i16_nxv8i64(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i64> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_nxv8i16_nxv8i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2 |
| ; CHECK-NEXT: vmv2r.v v10, v8 |
| ; CHECK-NEXT: vmv2r.v v12, v8 |
| ; CHECK-NEXT: vmv2r.v v14, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v16 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i64(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i64> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg4_mask_nxv8i16_nxv8i64(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_mask_nxv8i16_nxv8i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2 |
| ; CHECK-NEXT: vmv2r.v v10, v8 |
| ; CHECK-NEXT: vmv2r.v v12, v8 |
| ; CHECK-NEXT: vmv2r.v v14, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v16, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i64(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i32(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i32(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i32>, <vscale x 8 x i1>, i64) |
| |
| define void @test_vsoxseg4_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_nxv8i16_nxv8i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv2r.v v16, v8 |
| ; CHECK-NEXT: vmv2r.v v18, v16 |
| ; CHECK-NEXT: vmv2r.v v20, v16 |
| ; CHECK-NEXT: vmv2r.v v22, v16 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei32.v v16, (a0), v12 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg4_mask_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_mask_nxv8i16_nxv8i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv2r.v v16, v8 |
| ; CHECK-NEXT: vmv2r.v v18, v16 |
| ; CHECK-NEXT: vmv2r.v v20, v16 |
| ; CHECK-NEXT: vmv2r.v v22, v16 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei32.v v16, (a0), v12, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i32>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg2_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_nxv4i8_nxv4i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg2_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_mask_nxv4i8_nxv4i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i8>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg2_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_nxv4i8_nxv4i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v10, v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg2_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_mask_nxv4i8_nxv4i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v10, v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i64(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i64>, i64) |
| declare void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i64(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i64>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg2_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_nxv4i8_nxv4i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg2_mask_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_mask_nxv4i8_nxv4i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i16>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg2_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_nxv4i8_nxv4i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v10, v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg2_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_mask_nxv4i8_nxv4i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v10, v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i32>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg3_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_nxv4i8_nxv4i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v12, v8 |
| ; CHECK-NEXT: vmv1r.v v13, v12 |
| ; CHECK-NEXT: vmv1r.v v14, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg3_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_mask_nxv4i8_nxv4i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v12, v8 |
| ; CHECK-NEXT: vmv1r.v v13, v12 |
| ; CHECK-NEXT: vmv1r.v v14, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i8>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg3_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_nxv4i8_nxv4i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg3_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_mask_nxv4i8_nxv4i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i64>, i64) |
| declare void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i64>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg3_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_nxv4i8_nxv4i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg3_mask_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_mask_nxv4i8_nxv4i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i16>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg3_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_nxv4i8_nxv4i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg3_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_mask_nxv4i8_nxv4i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i32>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg4_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_nxv4i8_nxv4i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v12, v8 |
| ; CHECK-NEXT: vmv1r.v v13, v12 |
| ; CHECK-NEXT: vmv1r.v v14, v12 |
| ; CHECK-NEXT: vmv1r.v v15, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg4_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_mask_nxv4i8_nxv4i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v12, v8 |
| ; CHECK-NEXT: vmv1r.v v13, v12 |
| ; CHECK-NEXT: vmv1r.v v14, v12 |
| ; CHECK-NEXT: vmv1r.v v15, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i8>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg4_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_nxv4i8_nxv4i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg4_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_mask_nxv4i8_nxv4i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i64>, i64) |
| declare void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i64>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg4_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_nxv4i8_nxv4i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg4_mask_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_mask_nxv4i8_nxv4i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i16>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg4_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_nxv4i8_nxv4i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg4_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_mask_nxv4i8_nxv4i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i32>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg5_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_nxv4i8_nxv4i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v12, v8 |
| ; CHECK-NEXT: vmv1r.v v13, v12 |
| ; CHECK-NEXT: vmv1r.v v14, v12 |
| ; CHECK-NEXT: vmv1r.v v15, v12 |
| ; CHECK-NEXT: vmv1r.v v16, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg5_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_mask_nxv4i8_nxv4i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v12, v8 |
| ; CHECK-NEXT: vmv1r.v v13, v12 |
| ; CHECK-NEXT: vmv1r.v v14, v12 |
| ; CHECK-NEXT: vmv1r.v v15, v12 |
| ; CHECK-NEXT: vmv1r.v v16, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i8>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg5_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_nxv4i8_nxv4i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg5_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_mask_nxv4i8_nxv4i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i64>, i64) |
| declare void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i64>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg5_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_nxv4i8_nxv4i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v16, v8 |
| ; CHECK-NEXT: vmv1r.v v17, v16 |
| ; CHECK-NEXT: vmv1r.v v18, v16 |
| ; CHECK-NEXT: vmv1r.v v19, v16 |
| ; CHECK-NEXT: vmv1r.v v20, v16 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei64.v v16, (a0), v12 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg5_mask_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_mask_nxv4i8_nxv4i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v16, v8 |
| ; CHECK-NEXT: vmv1r.v v17, v16 |
| ; CHECK-NEXT: vmv1r.v v18, v16 |
| ; CHECK-NEXT: vmv1r.v v19, v16 |
| ; CHECK-NEXT: vmv1r.v v20, v16 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei64.v v16, (a0), v12, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i16>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg5_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_nxv4i8_nxv4i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg5_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_mask_nxv4i8_nxv4i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i32>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg6_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_nxv4i8_nxv4i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v12, v8 |
| ; CHECK-NEXT: vmv1r.v v13, v12 |
| ; CHECK-NEXT: vmv1r.v v14, v12 |
| ; CHECK-NEXT: vmv1r.v v15, v12 |
| ; CHECK-NEXT: vmv1r.v v16, v12 |
| ; CHECK-NEXT: vmv1r.v v17, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg6_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_mask_nxv4i8_nxv4i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v12, v8 |
| ; CHECK-NEXT: vmv1r.v v13, v12 |
| ; CHECK-NEXT: vmv1r.v v14, v12 |
| ; CHECK-NEXT: vmv1r.v v15, v12 |
| ; CHECK-NEXT: vmv1r.v v16, v12 |
| ; CHECK-NEXT: vmv1r.v v17, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i8>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg6_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_nxv4i8_nxv4i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg6_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_mask_nxv4i8_nxv4i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i64>, i64) |
| declare void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i64>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg6_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_nxv4i8_nxv4i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v16, v8 |
| ; CHECK-NEXT: vmv1r.v v17, v16 |
| ; CHECK-NEXT: vmv1r.v v18, v16 |
| ; CHECK-NEXT: vmv1r.v v19, v16 |
| ; CHECK-NEXT: vmv1r.v v20, v16 |
| ; CHECK-NEXT: vmv1r.v v21, v16 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei64.v v16, (a0), v12 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg6_mask_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_mask_nxv4i8_nxv4i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v16, v8 |
| ; CHECK-NEXT: vmv1r.v v17, v16 |
| ; CHECK-NEXT: vmv1r.v v18, v16 |
| ; CHECK-NEXT: vmv1r.v v19, v16 |
| ; CHECK-NEXT: vmv1r.v v20, v16 |
| ; CHECK-NEXT: vmv1r.v v21, v16 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei64.v v16, (a0), v12, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i16>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg6_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_nxv4i8_nxv4i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg6_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_mask_nxv4i8_nxv4i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i32>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg7_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg7_nxv4i8_nxv4i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v12, v8 |
| ; CHECK-NEXT: vmv1r.v v13, v12 |
| ; CHECK-NEXT: vmv1r.v v14, v12 |
| ; CHECK-NEXT: vmv1r.v v15, v12 |
| ; CHECK-NEXT: vmv1r.v v16, v12 |
| ; CHECK-NEXT: vmv1r.v v17, v12 |
| ; CHECK-NEXT: vmv1r.v v18, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg7_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg7_mask_nxv4i8_nxv4i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v12, v8 |
| ; CHECK-NEXT: vmv1r.v v13, v12 |
| ; CHECK-NEXT: vmv1r.v v14, v12 |
| ; CHECK-NEXT: vmv1r.v v15, v12 |
| ; CHECK-NEXT: vmv1r.v v16, v12 |
| ; CHECK-NEXT: vmv1r.v v17, v12 |
| ; CHECK-NEXT: vmv1r.v v18, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i8>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg7_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg7_nxv4i8_nxv4i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg7_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg7_mask_nxv4i8_nxv4i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i64>, i64) |
| declare void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i64>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg7_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg7_nxv4i8_nxv4i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v16, v8 |
| ; CHECK-NEXT: vmv1r.v v17, v16 |
| ; CHECK-NEXT: vmv1r.v v18, v16 |
| ; CHECK-NEXT: vmv1r.v v19, v16 |
| ; CHECK-NEXT: vmv1r.v v20, v16 |
| ; CHECK-NEXT: vmv1r.v v21, v16 |
| ; CHECK-NEXT: vmv1r.v v22, v16 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg7ei64.v v16, (a0), v12 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg7_mask_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg7_mask_nxv4i8_nxv4i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v16, v8 |
| ; CHECK-NEXT: vmv1r.v v17, v16 |
| ; CHECK-NEXT: vmv1r.v v18, v16 |
| ; CHECK-NEXT: vmv1r.v v19, v16 |
| ; CHECK-NEXT: vmv1r.v v20, v16 |
| ; CHECK-NEXT: vmv1r.v v21, v16 |
| ; CHECK-NEXT: vmv1r.v v22, v16 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg7ei64.v v16, (a0), v12, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i16>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg7_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg7_nxv4i8_nxv4i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg7_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg7_mask_nxv4i8_nxv4i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i32>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg8_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg8_nxv4i8_nxv4i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v12, v8 |
| ; CHECK-NEXT: vmv1r.v v13, v12 |
| ; CHECK-NEXT: vmv1r.v v14, v12 |
| ; CHECK-NEXT: vmv1r.v v15, v12 |
| ; CHECK-NEXT: vmv1r.v v16, v12 |
| ; CHECK-NEXT: vmv1r.v v17, v12 |
| ; CHECK-NEXT: vmv1r.v v18, v12 |
| ; CHECK-NEXT: vmv1r.v v19, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg8_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg8_mask_nxv4i8_nxv4i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v12, v8 |
| ; CHECK-NEXT: vmv1r.v v13, v12 |
| ; CHECK-NEXT: vmv1r.v v14, v12 |
| ; CHECK-NEXT: vmv1r.v v15, v12 |
| ; CHECK-NEXT: vmv1r.v v16, v12 |
| ; CHECK-NEXT: vmv1r.v v17, v12 |
| ; CHECK-NEXT: vmv1r.v v18, v12 |
| ; CHECK-NEXT: vmv1r.v v19, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i8>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg8_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg8_nxv4i8_nxv4i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vmv1r.v v17, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg8_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg8_mask_nxv4i8_nxv4i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vmv1r.v v17, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i64>, i64) |
| declare void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i64>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg8_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg8_nxv4i8_nxv4i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v16, v8 |
| ; CHECK-NEXT: vmv1r.v v17, v16 |
| ; CHECK-NEXT: vmv1r.v v18, v16 |
| ; CHECK-NEXT: vmv1r.v v19, v16 |
| ; CHECK-NEXT: vmv1r.v v20, v16 |
| ; CHECK-NEXT: vmv1r.v v21, v16 |
| ; CHECK-NEXT: vmv1r.v v22, v16 |
| ; CHECK-NEXT: vmv1r.v v23, v16 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg8ei64.v v16, (a0), v12 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg8_mask_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg8_mask_nxv4i8_nxv4i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v16, v8 |
| ; CHECK-NEXT: vmv1r.v v17, v16 |
| ; CHECK-NEXT: vmv1r.v v18, v16 |
| ; CHECK-NEXT: vmv1r.v v19, v16 |
| ; CHECK-NEXT: vmv1r.v v20, v16 |
| ; CHECK-NEXT: vmv1r.v v21, v16 |
| ; CHECK-NEXT: vmv1r.v v22, v16 |
| ; CHECK-NEXT: vmv1r.v v23, v16 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg8ei64.v v16, (a0), v12, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i16>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg8_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg8_nxv4i8_nxv4i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vmv1r.v v17, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg8_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg8_mask_nxv4i8_nxv4i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vmv1r.v v17, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu |
| ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i64(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i64>, i64) |
| declare void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i64(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg2_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i64> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_nxv1i16_nxv1i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v10, v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i64> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg2_mask_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_mask_nxv1i16_nxv1i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v10, v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i32(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i32(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i32>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg2_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_nxv1i16_nxv1i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v10, v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg2_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_mask_nxv1i16_nxv1i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v10, v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg2_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_nxv1i16_nxv1i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v10, v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg2_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_mask_nxv1i16_nxv1i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v10, v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg2_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_nxv1i16_nxv1i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v10, v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg2_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_mask_nxv1i16_nxv1i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v10, v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i64>, i64) |
| declare void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg3_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i64> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_nxv1i16_nxv1i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i64> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg3_mask_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i16_nxv1i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i32>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg3_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_nxv1i16_nxv1i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg3_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i16_nxv1i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg3_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_nxv1i16_nxv1i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg3_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i16_nxv1i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg3_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_nxv1i16_nxv1i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg3_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i16_nxv1i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i64>, i64) |
| declare void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg4_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i64> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_nxv1i16_nxv1i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i64> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg4_mask_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i16_nxv1i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i32>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg4_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_nxv1i16_nxv1i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg4_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i16_nxv1i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg4_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_nxv1i16_nxv1i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg4_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i16_nxv1i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg4_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_nxv1i16_nxv1i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg4_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i16_nxv1i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i64>, i64) |
| declare void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg5_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i64> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_nxv1i16_nxv1i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i64> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg5_mask_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i16_nxv1i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i32>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg5_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_nxv1i16_nxv1i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg5_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i16_nxv1i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg5_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_nxv1i16_nxv1i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg5_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i16_nxv1i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg5_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_nxv1i16_nxv1i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg5_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i16_nxv1i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i64>, i64) |
| declare void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg6_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i64> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_nxv1i16_nxv1i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i64> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg6_mask_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i16_nxv1i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i32>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg6_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_nxv1i16_nxv1i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg6_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i16_nxv1i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg6_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_nxv1i16_nxv1i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg6_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i16_nxv1i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg6_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_nxv1i16_nxv1i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg6_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i16_nxv1i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i64>, i64) |
| declare void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg7_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i64> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg7_nxv1i16_nxv1i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu |
| ; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i64> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg7_mask_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i16_nxv1i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu |
| ; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i32>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg7_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg7_nxv1i16_nxv1i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu |
| ; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg7_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i16_nxv1i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu |
| ; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg7_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg7_nxv1i16_nxv1i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu |
| ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg7_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i16_nxv1i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu |
| ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg7_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg7_nxv1i16_nxv1i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu |
| ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg7_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i16_nxv1i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu |
| ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i64>, i64) |
| declare void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg8_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i64> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg8_nxv1i16_nxv1i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vmv1r.v v17, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu |
| ; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i64> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg8_mask_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg8_mask_nxv1i16_nxv1i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vmv1r.v v17, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu |
| ; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i32>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg8_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg8_nxv1i16_nxv1i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vmv1r.v v17, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu |
| ; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg8_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg8_mask_nxv1i16_nxv1i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vmv1r.v v17, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu |
| ; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg8_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg8_nxv1i16_nxv1i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vmv1r.v v17, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu |
| ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg8_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg8_mask_nxv1i16_nxv1i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vmv1r.v v17, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu |
| ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg8_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg8_nxv1i16_nxv1i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vmv1r.v v17, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu |
| ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg8_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg8_mask_nxv1i16_nxv1i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vmv1r.v v17, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu |
| ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i32>, <vscale x 2 x i1>, i64) |
| |
| define void @test_vsoxseg2_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_nxv2i32_nxv2i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v10, v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg2_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_mask_nxv2i32_nxv2i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v10, v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i8>, <vscale x 2 x i1>, i64) |
| |
| define void @test_vsoxseg2_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_nxv2i32_nxv2i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v10, v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg2_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_mask_nxv2i32_nxv2i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v10, v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i16>, <vscale x 2 x i1>, i64) |
| |
| define void @test_vsoxseg2_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_nxv2i32_nxv2i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v10, v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg2_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_mask_nxv2i32_nxv2i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v10, v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i64(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i64>, i64) |
| declare void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i64(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i64>, <vscale x 2 x i1>, i64) |
| |
| define void @test_vsoxseg2_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i64> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_nxv2i32_nxv2i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i64> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg2_mask_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_mask_nxv2i32_nxv2i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i32>, <vscale x 2 x i1>, i64) |
| |
| define void @test_vsoxseg3_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_nxv2i32_nxv2i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg3_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_mask_nxv2i32_nxv2i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i8>, <vscale x 2 x i1>, i64) |
| |
| define void @test_vsoxseg3_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_nxv2i32_nxv2i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg3_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_mask_nxv2i32_nxv2i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i16>, <vscale x 2 x i1>, i64) |
| |
| define void @test_vsoxseg3_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_nxv2i32_nxv2i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg3_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_mask_nxv2i32_nxv2i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i64(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i64>, i64) |
| declare void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i64(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i64>, <vscale x 2 x i1>, i64) |
| |
| define void @test_vsoxseg3_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i64> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_nxv2i32_nxv2i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v12, v8 |
| ; CHECK-NEXT: vmv1r.v v13, v12 |
| ; CHECK-NEXT: vmv1r.v v14, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei64.v v12, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i64> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg3_mask_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_mask_nxv2i32_nxv2i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v12, v8 |
| ; CHECK-NEXT: vmv1r.v v13, v12 |
| ; CHECK-NEXT: vmv1r.v v14, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei64.v v12, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i32>, <vscale x 2 x i1>, i64) |
| |
| define void @test_vsoxseg4_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_nxv2i32_nxv2i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg4_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_mask_nxv2i32_nxv2i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i8>, <vscale x 2 x i1>, i64) |
| |
| define void @test_vsoxseg4_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_nxv2i32_nxv2i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg4_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_mask_nxv2i32_nxv2i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i16>, <vscale x 2 x i1>, i64) |
| |
| define void @test_vsoxseg4_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_nxv2i32_nxv2i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg4_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_mask_nxv2i32_nxv2i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i64(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i64>, i64) |
| declare void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i64(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i64>, <vscale x 2 x i1>, i64) |
| |
| define void @test_vsoxseg4_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i64> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_nxv2i32_nxv2i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v12, v8 |
| ; CHECK-NEXT: vmv1r.v v13, v12 |
| ; CHECK-NEXT: vmv1r.v v14, v12 |
| ; CHECK-NEXT: vmv1r.v v15, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i64> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg4_mask_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_mask_nxv2i32_nxv2i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v12, v8 |
| ; CHECK-NEXT: vmv1r.v v13, v12 |
| ; CHECK-NEXT: vmv1r.v v14, v12 |
| ; CHECK-NEXT: vmv1r.v v15, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i32>, <vscale x 2 x i1>, i64) |
| |
| define void @test_vsoxseg5_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_nxv2i32_nxv2i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg5_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_mask_nxv2i32_nxv2i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i8>, <vscale x 2 x i1>, i64) |
| |
| define void @test_vsoxseg5_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_nxv2i32_nxv2i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg5_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_mask_nxv2i32_nxv2i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i16>, <vscale x 2 x i1>, i64) |
| |
| define void @test_vsoxseg5_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_nxv2i32_nxv2i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg5_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_mask_nxv2i32_nxv2i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i64(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i64>, i64) |
| declare void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i64(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i64>, <vscale x 2 x i1>, i64) |
| |
| define void @test_vsoxseg5_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i64> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_nxv2i32_nxv2i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v12, v8 |
| ; CHECK-NEXT: vmv1r.v v13, v12 |
| ; CHECK-NEXT: vmv1r.v v14, v12 |
| ; CHECK-NEXT: vmv1r.v v15, v12 |
| ; CHECK-NEXT: vmv1r.v v16, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei64.v v12, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i64> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg5_mask_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_mask_nxv2i32_nxv2i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v12, v8 |
| ; CHECK-NEXT: vmv1r.v v13, v12 |
| ; CHECK-NEXT: vmv1r.v v14, v12 |
| ; CHECK-NEXT: vmv1r.v v15, v12 |
| ; CHECK-NEXT: vmv1r.v v16, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei64.v v12, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i32>, <vscale x 2 x i1>, i64) |
| |
| define void @test_vsoxseg6_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_nxv2i32_nxv2i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg6_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_mask_nxv2i32_nxv2i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i8>, <vscale x 2 x i1>, i64) |
| |
| define void @test_vsoxseg6_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_nxv2i32_nxv2i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg6_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_mask_nxv2i32_nxv2i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i16>, <vscale x 2 x i1>, i64) |
| |
| define void @test_vsoxseg6_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_nxv2i32_nxv2i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg6_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_mask_nxv2i32_nxv2i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i64(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i64>, i64) |
| declare void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i64(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i64>, <vscale x 2 x i1>, i64) |
| |
| define void @test_vsoxseg6_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i64> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_nxv2i32_nxv2i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v12, v8 |
| ; CHECK-NEXT: vmv1r.v v13, v12 |
| ; CHECK-NEXT: vmv1r.v v14, v12 |
| ; CHECK-NEXT: vmv1r.v v15, v12 |
| ; CHECK-NEXT: vmv1r.v v16, v12 |
| ; CHECK-NEXT: vmv1r.v v17, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei64.v v12, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i64> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg6_mask_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_mask_nxv2i32_nxv2i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v12, v8 |
| ; CHECK-NEXT: vmv1r.v v13, v12 |
| ; CHECK-NEXT: vmv1r.v v14, v12 |
| ; CHECK-NEXT: vmv1r.v v15, v12 |
| ; CHECK-NEXT: vmv1r.v v16, v12 |
| ; CHECK-NEXT: vmv1r.v v17, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei64.v v12, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i32>, <vscale x 2 x i1>, i64) |
| |
| define void @test_vsoxseg7_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg7_nxv2i32_nxv2i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg7_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg7_mask_nxv2i32_nxv2i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i8>, <vscale x 2 x i1>, i64) |
| |
| define void @test_vsoxseg7_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg7_nxv2i32_nxv2i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg7_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg7_mask_nxv2i32_nxv2i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i16>, <vscale x 2 x i1>, i64) |
| |
| define void @test_vsoxseg7_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg7_nxv2i32_nxv2i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg7_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg7_mask_nxv2i32_nxv2i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i64(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i64>, i64) |
| declare void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i64(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i64>, <vscale x 2 x i1>, i64) |
| |
| define void @test_vsoxseg7_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i64> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg7_nxv2i32_nxv2i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v12, v8 |
| ; CHECK-NEXT: vmv1r.v v13, v12 |
| ; CHECK-NEXT: vmv1r.v v14, v12 |
| ; CHECK-NEXT: vmv1r.v v15, v12 |
| ; CHECK-NEXT: vmv1r.v v16, v12 |
| ; CHECK-NEXT: vmv1r.v v17, v12 |
| ; CHECK-NEXT: vmv1r.v v18, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg7ei64.v v12, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i64> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg7_mask_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg7_mask_nxv2i32_nxv2i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v12, v8 |
| ; CHECK-NEXT: vmv1r.v v13, v12 |
| ; CHECK-NEXT: vmv1r.v v14, v12 |
| ; CHECK-NEXT: vmv1r.v v15, v12 |
| ; CHECK-NEXT: vmv1r.v v16, v12 |
| ; CHECK-NEXT: vmv1r.v v17, v12 |
| ; CHECK-NEXT: vmv1r.v v18, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg7ei64.v v12, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i32>, <vscale x 2 x i1>, i64) |
| |
| define void @test_vsoxseg8_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg8_nxv2i32_nxv2i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vmv1r.v v17, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg8_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg8_mask_nxv2i32_nxv2i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vmv1r.v v17, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i8>, <vscale x 2 x i1>, i64) |
| |
| define void @test_vsoxseg8_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg8_nxv2i32_nxv2i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vmv1r.v v17, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg8_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg8_mask_nxv2i32_nxv2i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vmv1r.v v17, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i16>, <vscale x 2 x i1>, i64) |
| |
| define void @test_vsoxseg8_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg8_nxv2i32_nxv2i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vmv1r.v v17, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg8_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg8_mask_nxv2i32_nxv2i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vmv1r.v v17, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i64(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i64>, i64) |
| declare void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i64(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i64>, <vscale x 2 x i1>, i64) |
| |
| define void @test_vsoxseg8_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i64> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg8_nxv2i32_nxv2i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v12, v8 |
| ; CHECK-NEXT: vmv1r.v v13, v12 |
| ; CHECK-NEXT: vmv1r.v v14, v12 |
| ; CHECK-NEXT: vmv1r.v v15, v12 |
| ; CHECK-NEXT: vmv1r.v v16, v12 |
| ; CHECK-NEXT: vmv1r.v v17, v12 |
| ; CHECK-NEXT: vmv1r.v v18, v12 |
| ; CHECK-NEXT: vmv1r.v v19, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg8ei64.v v12, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i64> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg8_mask_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg8_mask_nxv2i32_nxv2i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v12, v8 |
| ; CHECK-NEXT: vmv1r.v v13, v12 |
| ; CHECK-NEXT: vmv1r.v v14, v12 |
| ; CHECK-NEXT: vmv1r.v v15, v12 |
| ; CHECK-NEXT: vmv1r.v v16, v12 |
| ; CHECK-NEXT: vmv1r.v v17, v12 |
| ; CHECK-NEXT: vmv1r.v v18, v12 |
| ; CHECK-NEXT: vmv1r.v v19, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg8ei64.v v12, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i16>, <vscale x 8 x i1>, i64) |
| |
| define void @test_vsoxseg2_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_nxv8i8_nxv8i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg2_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_mask_nxv8i8_nxv8i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i8>, <vscale x 8 x i1>, i64) |
| |
| define void @test_vsoxseg2_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_nxv8i8_nxv8i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v10, v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg2_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_mask_nxv8i8_nxv8i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v10, v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i64(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i64>, i64) |
| declare void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i64(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i64>, <vscale x 8 x i1>, i64) |
| |
| define void @test_vsoxseg2_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_nxv8i8_nxv8i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg2_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_mask_nxv8i8_nxv8i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i32>, <vscale x 8 x i1>, i64) |
| |
| define void @test_vsoxseg2_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_nxv8i8_nxv8i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg2_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_mask_nxv8i8_nxv8i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i16>, <vscale x 8 x i1>, i64) |
| |
| define void @test_vsoxseg3_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_nxv8i8_nxv8i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v12, v8 |
| ; CHECK-NEXT: vmv1r.v v13, v12 |
| ; CHECK-NEXT: vmv1r.v v14, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg3_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_mask_nxv8i8_nxv8i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v12, v8 |
| ; CHECK-NEXT: vmv1r.v v13, v12 |
| ; CHECK-NEXT: vmv1r.v v14, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i8>, <vscale x 8 x i1>, i64) |
| |
| define void @test_vsoxseg3_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_nxv8i8_nxv8i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg3_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_mask_nxv8i8_nxv8i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i64>, i64) |
| declare void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i64>, <vscale x 8 x i1>, i64) |
| |
| define void @test_vsoxseg3_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_nxv8i8_nxv8i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v16 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg3_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_mask_nxv8i8_nxv8i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v16, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i32>, <vscale x 8 x i1>, i64) |
| |
| define void @test_vsoxseg3_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_nxv8i8_nxv8i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg3_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_mask_nxv8i8_nxv8i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i16>, <vscale x 8 x i1>, i64) |
| |
| define void @test_vsoxseg4_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_nxv8i8_nxv8i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v12, v8 |
| ; CHECK-NEXT: vmv1r.v v13, v12 |
| ; CHECK-NEXT: vmv1r.v v14, v12 |
| ; CHECK-NEXT: vmv1r.v v15, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg4_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_mask_nxv8i8_nxv8i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v12, v8 |
| ; CHECK-NEXT: vmv1r.v v13, v12 |
| ; CHECK-NEXT: vmv1r.v v14, v12 |
| ; CHECK-NEXT: vmv1r.v v15, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i8>, <vscale x 8 x i1>, i64) |
| |
| define void @test_vsoxseg4_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_nxv8i8_nxv8i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg4_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_mask_nxv8i8_nxv8i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i64>, i64) |
| declare void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i64>, <vscale x 8 x i1>, i64) |
| |
| define void @test_vsoxseg4_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_nxv8i8_nxv8i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v16 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg4_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_mask_nxv8i8_nxv8i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v16, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i32>, <vscale x 8 x i1>, i64) |
| |
| define void @test_vsoxseg4_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_nxv8i8_nxv8i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg4_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_mask_nxv8i8_nxv8i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i16>, <vscale x 8 x i1>, i64) |
| |
| define void @test_vsoxseg5_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_nxv8i8_nxv8i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v12, v8 |
| ; CHECK-NEXT: vmv1r.v v13, v12 |
| ; CHECK-NEXT: vmv1r.v v14, v12 |
| ; CHECK-NEXT: vmv1r.v v15, v12 |
| ; CHECK-NEXT: vmv1r.v v16, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei16.v v12, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg5_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_mask_nxv8i8_nxv8i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v12, v8 |
| ; CHECK-NEXT: vmv1r.v v13, v12 |
| ; CHECK-NEXT: vmv1r.v v14, v12 |
| ; CHECK-NEXT: vmv1r.v v15, v12 |
| ; CHECK-NEXT: vmv1r.v v16, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei16.v v12, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i8>, <vscale x 8 x i1>, i64) |
| |
| define void @test_vsoxseg5_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_nxv8i8_nxv8i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg5_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_mask_nxv8i8_nxv8i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i64>, i64) |
| declare void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i64>, <vscale x 8 x i1>, i64) |
| |
| define void @test_vsoxseg5_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_nxv8i8_nxv8i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v8 |
| ; CHECK-NEXT: vmv1r.v v12, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v16 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg5_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_mask_nxv8i8_nxv8i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v8 |
| ; CHECK-NEXT: vmv1r.v v12, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v16, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i32>, <vscale x 8 x i1>, i64) |
| |
| define void @test_vsoxseg5_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_nxv8i8_nxv8i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v16, v8 |
| ; CHECK-NEXT: vmv1r.v v17, v16 |
| ; CHECK-NEXT: vmv1r.v v18, v16 |
| ; CHECK-NEXT: vmv1r.v v19, v16 |
| ; CHECK-NEXT: vmv1r.v v20, v16 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei32.v v16, (a0), v12 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg5_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_mask_nxv8i8_nxv8i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v16, v8 |
| ; CHECK-NEXT: vmv1r.v v17, v16 |
| ; CHECK-NEXT: vmv1r.v v18, v16 |
| ; CHECK-NEXT: vmv1r.v v19, v16 |
| ; CHECK-NEXT: vmv1r.v v20, v16 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei32.v v16, (a0), v12, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i16>, <vscale x 8 x i1>, i64) |
| |
| define void @test_vsoxseg6_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_nxv8i8_nxv8i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v12, v8 |
| ; CHECK-NEXT: vmv1r.v v13, v12 |
| ; CHECK-NEXT: vmv1r.v v14, v12 |
| ; CHECK-NEXT: vmv1r.v v15, v12 |
| ; CHECK-NEXT: vmv1r.v v16, v12 |
| ; CHECK-NEXT: vmv1r.v v17, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei16.v v12, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg6_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_mask_nxv8i8_nxv8i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v12, v8 |
| ; CHECK-NEXT: vmv1r.v v13, v12 |
| ; CHECK-NEXT: vmv1r.v v14, v12 |
| ; CHECK-NEXT: vmv1r.v v15, v12 |
| ; CHECK-NEXT: vmv1r.v v16, v12 |
| ; CHECK-NEXT: vmv1r.v v17, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei16.v v12, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i8>, <vscale x 8 x i1>, i64) |
| |
| define void @test_vsoxseg6_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_nxv8i8_nxv8i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg6_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_mask_nxv8i8_nxv8i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i64>, i64) |
| declare void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i64>, <vscale x 8 x i1>, i64) |
| |
| define void @test_vsoxseg6_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_nxv8i8_nxv8i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v8 |
| ; CHECK-NEXT: vmv1r.v v12, v8 |
| ; CHECK-NEXT: vmv1r.v v13, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v16 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg6_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_mask_nxv8i8_nxv8i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v8 |
| ; CHECK-NEXT: vmv1r.v v12, v8 |
| ; CHECK-NEXT: vmv1r.v v13, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v16, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i32>, <vscale x 8 x i1>, i64) |
| |
| define void @test_vsoxseg6_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_nxv8i8_nxv8i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v16, v8 |
| ; CHECK-NEXT: vmv1r.v v17, v16 |
| ; CHECK-NEXT: vmv1r.v v18, v16 |
| ; CHECK-NEXT: vmv1r.v v19, v16 |
| ; CHECK-NEXT: vmv1r.v v20, v16 |
| ; CHECK-NEXT: vmv1r.v v21, v16 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei32.v v16, (a0), v12 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg6_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_mask_nxv8i8_nxv8i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v16, v8 |
| ; CHECK-NEXT: vmv1r.v v17, v16 |
| ; CHECK-NEXT: vmv1r.v v18, v16 |
| ; CHECK-NEXT: vmv1r.v v19, v16 |
| ; CHECK-NEXT: vmv1r.v v20, v16 |
| ; CHECK-NEXT: vmv1r.v v21, v16 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei32.v v16, (a0), v12, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i16>, <vscale x 8 x i1>, i64) |
| |
| define void @test_vsoxseg7_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg7_nxv8i8_nxv8i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v12, v8 |
| ; CHECK-NEXT: vmv1r.v v13, v12 |
| ; CHECK-NEXT: vmv1r.v v14, v12 |
| ; CHECK-NEXT: vmv1r.v v15, v12 |
| ; CHECK-NEXT: vmv1r.v v16, v12 |
| ; CHECK-NEXT: vmv1r.v v17, v12 |
| ; CHECK-NEXT: vmv1r.v v18, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg7ei16.v v12, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg7_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg7_mask_nxv8i8_nxv8i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v12, v8 |
| ; CHECK-NEXT: vmv1r.v v13, v12 |
| ; CHECK-NEXT: vmv1r.v v14, v12 |
| ; CHECK-NEXT: vmv1r.v v15, v12 |
| ; CHECK-NEXT: vmv1r.v v16, v12 |
| ; CHECK-NEXT: vmv1r.v v17, v12 |
| ; CHECK-NEXT: vmv1r.v v18, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg7ei16.v v12, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i8>, <vscale x 8 x i1>, i64) |
| |
| define void @test_vsoxseg7_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg7_nxv8i8_nxv8i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg7_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg7_mask_nxv8i8_nxv8i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i64>, i64) |
| declare void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i64>, <vscale x 8 x i1>, i64) |
| |
| define void @test_vsoxseg7_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg7_nxv8i8_nxv8i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v8 |
| ; CHECK-NEXT: vmv1r.v v12, v8 |
| ; CHECK-NEXT: vmv1r.v v13, v8 |
| ; CHECK-NEXT: vmv1r.v v14, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v16 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg7_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg7_mask_nxv8i8_nxv8i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v8 |
| ; CHECK-NEXT: vmv1r.v v12, v8 |
| ; CHECK-NEXT: vmv1r.v v13, v8 |
| ; CHECK-NEXT: vmv1r.v v14, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v16, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i32>, <vscale x 8 x i1>, i64) |
| |
| define void @test_vsoxseg7_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg7_nxv8i8_nxv8i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v16, v8 |
| ; CHECK-NEXT: vmv1r.v v17, v16 |
| ; CHECK-NEXT: vmv1r.v v18, v16 |
| ; CHECK-NEXT: vmv1r.v v19, v16 |
| ; CHECK-NEXT: vmv1r.v v20, v16 |
| ; CHECK-NEXT: vmv1r.v v21, v16 |
| ; CHECK-NEXT: vmv1r.v v22, v16 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg7ei32.v v16, (a0), v12 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg7_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg7_mask_nxv8i8_nxv8i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v16, v8 |
| ; CHECK-NEXT: vmv1r.v v17, v16 |
| ; CHECK-NEXT: vmv1r.v v18, v16 |
| ; CHECK-NEXT: vmv1r.v v19, v16 |
| ; CHECK-NEXT: vmv1r.v v20, v16 |
| ; CHECK-NEXT: vmv1r.v v21, v16 |
| ; CHECK-NEXT: vmv1r.v v22, v16 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg7ei32.v v16, (a0), v12, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i16>, <vscale x 8 x i1>, i64) |
| |
| define void @test_vsoxseg8_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg8_nxv8i8_nxv8i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v12, v8 |
| ; CHECK-NEXT: vmv1r.v v13, v12 |
| ; CHECK-NEXT: vmv1r.v v14, v12 |
| ; CHECK-NEXT: vmv1r.v v15, v12 |
| ; CHECK-NEXT: vmv1r.v v16, v12 |
| ; CHECK-NEXT: vmv1r.v v17, v12 |
| ; CHECK-NEXT: vmv1r.v v18, v12 |
| ; CHECK-NEXT: vmv1r.v v19, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg8ei16.v v12, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg8_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg8_mask_nxv8i8_nxv8i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v12, v8 |
| ; CHECK-NEXT: vmv1r.v v13, v12 |
| ; CHECK-NEXT: vmv1r.v v14, v12 |
| ; CHECK-NEXT: vmv1r.v v15, v12 |
| ; CHECK-NEXT: vmv1r.v v16, v12 |
| ; CHECK-NEXT: vmv1r.v v17, v12 |
| ; CHECK-NEXT: vmv1r.v v18, v12 |
| ; CHECK-NEXT: vmv1r.v v19, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg8ei16.v v12, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i8>, <vscale x 8 x i1>, i64) |
| |
| define void @test_vsoxseg8_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg8_nxv8i8_nxv8i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vmv1r.v v17, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg8_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg8_mask_nxv8i8_nxv8i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vmv1r.v v17, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i64>, i64) |
| declare void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i64>, <vscale x 8 x i1>, i64) |
| |
| define void @test_vsoxseg8_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg8_nxv8i8_nxv8i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v8 |
| ; CHECK-NEXT: vmv1r.v v12, v8 |
| ; CHECK-NEXT: vmv1r.v v13, v8 |
| ; CHECK-NEXT: vmv1r.v v14, v8 |
| ; CHECK-NEXT: vmv1r.v v15, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg8_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg8_mask_nxv8i8_nxv8i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v8 |
| ; CHECK-NEXT: vmv1r.v v12, v8 |
| ; CHECK-NEXT: vmv1r.v v13, v8 |
| ; CHECK-NEXT: vmv1r.v v14, v8 |
| ; CHECK-NEXT: vmv1r.v v15, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i32>, <vscale x 8 x i1>, i64) |
| |
| define void @test_vsoxseg8_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg8_nxv8i8_nxv8i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v16, v8 |
| ; CHECK-NEXT: vmv1r.v v17, v16 |
| ; CHECK-NEXT: vmv1r.v v18, v16 |
| ; CHECK-NEXT: vmv1r.v v19, v16 |
| ; CHECK-NEXT: vmv1r.v v20, v16 |
| ; CHECK-NEXT: vmv1r.v v21, v16 |
| ; CHECK-NEXT: vmv1r.v v22, v16 |
| ; CHECK-NEXT: vmv1r.v v23, v16 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg8ei32.v v16, (a0), v12 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg8_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg8_mask_nxv8i8_nxv8i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v16, v8 |
| ; CHECK-NEXT: vmv1r.v v17, v16 |
| ; CHECK-NEXT: vmv1r.v v18, v16 |
| ; CHECK-NEXT: vmv1r.v v19, v16 |
| ; CHECK-NEXT: vmv1r.v v20, v16 |
| ; CHECK-NEXT: vmv1r.v v21, v16 |
| ; CHECK-NEXT: vmv1r.v v22, v16 |
| ; CHECK-NEXT: vmv1r.v v23, v16 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg8ei32.v v16, (a0), v12, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i32(<vscale x 4 x i64>,<vscale x 4 x i64>, i64*, <vscale x 4 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i32(<vscale x 4 x i64>,<vscale x 4 x i64>, i64*, <vscale x 4 x i32>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg2_nxv4i64_nxv4i32(<vscale x 4 x i64> %val, i64* %base, <vscale x 4 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_nxv4i64_nxv4i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 |
| ; CHECK-NEXT: vmv2r.v v16, v12 |
| ; CHECK-NEXT: vmv4r.v v12, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i32(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 4 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg2_mask_nxv4i64_nxv4i32(<vscale x 4 x i64> %val, i64* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_mask_nxv4i64_nxv4i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 |
| ; CHECK-NEXT: vmv2r.v v16, v12 |
| ; CHECK-NEXT: vmv4r.v v12, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i32(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i8(<vscale x 4 x i64>,<vscale x 4 x i64>, i64*, <vscale x 4 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i8(<vscale x 4 x i64>,<vscale x 4 x i64>, i64*, <vscale x 4 x i8>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg2_nxv4i64_nxv4i8(<vscale x 4 x i64> %val, i64* %base, <vscale x 4 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_nxv4i64_nxv4i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 |
| ; CHECK-NEXT: vmv1r.v v16, v12 |
| ; CHECK-NEXT: vmv4r.v v12, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i8(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 4 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg2_mask_nxv4i64_nxv4i8(<vscale x 4 x i64> %val, i64* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_mask_nxv4i64_nxv4i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 |
| ; CHECK-NEXT: vmv1r.v v16, v12 |
| ; CHECK-NEXT: vmv4r.v v12, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i8(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i64(<vscale x 4 x i64>,<vscale x 4 x i64>, i64*, <vscale x 4 x i64>, i64) |
| declare void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i64(<vscale x 4 x i64>,<vscale x 4 x i64>, i64*, <vscale x 4 x i64>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg2_nxv4i64_nxv4i64(<vscale x 4 x i64> %val, i64* %base, <vscale x 4 x i64> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_nxv4i64_nxv4i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 |
| ; CHECK-NEXT: vmv4r.v v16, v12 |
| ; CHECK-NEXT: vmv4r.v v12, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i64(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 4 x i64> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg2_mask_nxv4i64_nxv4i64(<vscale x 4 x i64> %val, i64* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_mask_nxv4i64_nxv4i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 |
| ; CHECK-NEXT: vmv4r.v v16, v12 |
| ; CHECK-NEXT: vmv4r.v v12, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i64(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i16(<vscale x 4 x i64>,<vscale x 4 x i64>, i64*, <vscale x 4 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i16(<vscale x 4 x i64>,<vscale x 4 x i64>, i64*, <vscale x 4 x i16>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg2_nxv4i64_nxv4i16(<vscale x 4 x i64> %val, i64* %base, <vscale x 4 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_nxv4i64_nxv4i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 |
| ; CHECK-NEXT: vmv1r.v v16, v12 |
| ; CHECK-NEXT: vmv4r.v v12, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i16(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 4 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg2_mask_nxv4i64_nxv4i16(<vscale x 4 x i64> %val, i64* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_mask_nxv4i64_nxv4i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 |
| ; CHECK-NEXT: vmv1r.v v16, v12 |
| ; CHECK-NEXT: vmv4r.v v12, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i16(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i32>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg2_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_nxv4i16_nxv4i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg2_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_mask_nxv4i16_nxv4i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i8>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg2_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_nxv4i16_nxv4i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v10, v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg2_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_mask_nxv4i16_nxv4i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v10, v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i64(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i64>, i64) |
| declare void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i64(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i64>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg2_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i64> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_nxv4i16_nxv4i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i64> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg2_mask_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_mask_nxv4i16_nxv4i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i16>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg2_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_nxv4i16_nxv4i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v10, v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg2_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_mask_nxv4i16_nxv4i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v10, v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i32>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg3_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_nxv4i16_nxv4i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v12, v8 |
| ; CHECK-NEXT: vmv1r.v v13, v12 |
| ; CHECK-NEXT: vmv1r.v v14, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg3_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_mask_nxv4i16_nxv4i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v12, v8 |
| ; CHECK-NEXT: vmv1r.v v13, v12 |
| ; CHECK-NEXT: vmv1r.v v14, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i8>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg3_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_nxv4i16_nxv4i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg3_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_mask_nxv4i16_nxv4i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i64>, i64) |
| declare void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i64>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg3_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i64> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_nxv4i16_nxv4i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i64> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg3_mask_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_mask_nxv4i16_nxv4i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i16>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg3_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_nxv4i16_nxv4i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg3_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_mask_nxv4i16_nxv4i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i32>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg4_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_nxv4i16_nxv4i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v12, v8 |
| ; CHECK-NEXT: vmv1r.v v13, v12 |
| ; CHECK-NEXT: vmv1r.v v14, v12 |
| ; CHECK-NEXT: vmv1r.v v15, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg4_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_mask_nxv4i16_nxv4i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v12, v8 |
| ; CHECK-NEXT: vmv1r.v v13, v12 |
| ; CHECK-NEXT: vmv1r.v v14, v12 |
| ; CHECK-NEXT: vmv1r.v v15, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i8>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg4_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_nxv4i16_nxv4i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg4_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_mask_nxv4i16_nxv4i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i64>, i64) |
| declare void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i64>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg4_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i64> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_nxv4i16_nxv4i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i64> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg4_mask_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_mask_nxv4i16_nxv4i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i16>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg4_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_nxv4i16_nxv4i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg4_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_mask_nxv4i16_nxv4i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i32>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg5_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_nxv4i16_nxv4i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v12, v8 |
| ; CHECK-NEXT: vmv1r.v v13, v12 |
| ; CHECK-NEXT: vmv1r.v v14, v12 |
| ; CHECK-NEXT: vmv1r.v v15, v12 |
| ; CHECK-NEXT: vmv1r.v v16, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg5_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_mask_nxv4i16_nxv4i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v12, v8 |
| ; CHECK-NEXT: vmv1r.v v13, v12 |
| ; CHECK-NEXT: vmv1r.v v14, v12 |
| ; CHECK-NEXT: vmv1r.v v15, v12 |
| ; CHECK-NEXT: vmv1r.v v16, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i8>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg5_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_nxv4i16_nxv4i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg5_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_mask_nxv4i16_nxv4i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i64>, i64) |
| declare void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i64>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg5_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i64> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_nxv4i16_nxv4i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v16, v8 |
| ; CHECK-NEXT: vmv1r.v v17, v16 |
| ; CHECK-NEXT: vmv1r.v v18, v16 |
| ; CHECK-NEXT: vmv1r.v v19, v16 |
| ; CHECK-NEXT: vmv1r.v v20, v16 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei64.v v16, (a0), v12 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i64> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg5_mask_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_mask_nxv4i16_nxv4i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v16, v8 |
| ; CHECK-NEXT: vmv1r.v v17, v16 |
| ; CHECK-NEXT: vmv1r.v v18, v16 |
| ; CHECK-NEXT: vmv1r.v v19, v16 |
| ; CHECK-NEXT: vmv1r.v v20, v16 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei64.v v16, (a0), v12, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i16>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg5_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_nxv4i16_nxv4i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg5_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_mask_nxv4i16_nxv4i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i32>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg6_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_nxv4i16_nxv4i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v12, v8 |
| ; CHECK-NEXT: vmv1r.v v13, v12 |
| ; CHECK-NEXT: vmv1r.v v14, v12 |
| ; CHECK-NEXT: vmv1r.v v15, v12 |
| ; CHECK-NEXT: vmv1r.v v16, v12 |
| ; CHECK-NEXT: vmv1r.v v17, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg6_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_mask_nxv4i16_nxv4i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v12, v8 |
| ; CHECK-NEXT: vmv1r.v v13, v12 |
| ; CHECK-NEXT: vmv1r.v v14, v12 |
| ; CHECK-NEXT: vmv1r.v v15, v12 |
| ; CHECK-NEXT: vmv1r.v v16, v12 |
| ; CHECK-NEXT: vmv1r.v v17, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i8>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg6_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_nxv4i16_nxv4i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg6_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_mask_nxv4i16_nxv4i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i64>, i64) |
| declare void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i64>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg6_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i64> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_nxv4i16_nxv4i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v16, v8 |
| ; CHECK-NEXT: vmv1r.v v17, v16 |
| ; CHECK-NEXT: vmv1r.v v18, v16 |
| ; CHECK-NEXT: vmv1r.v v19, v16 |
| ; CHECK-NEXT: vmv1r.v v20, v16 |
| ; CHECK-NEXT: vmv1r.v v21, v16 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei64.v v16, (a0), v12 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i64> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg6_mask_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_mask_nxv4i16_nxv4i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v16, v8 |
| ; CHECK-NEXT: vmv1r.v v17, v16 |
| ; CHECK-NEXT: vmv1r.v v18, v16 |
| ; CHECK-NEXT: vmv1r.v v19, v16 |
| ; CHECK-NEXT: vmv1r.v v20, v16 |
| ; CHECK-NEXT: vmv1r.v v21, v16 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei64.v v16, (a0), v12, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i16>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg6_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_nxv4i16_nxv4i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg6_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_mask_nxv4i16_nxv4i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i32>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg7_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg7_nxv4i16_nxv4i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v12, v8 |
| ; CHECK-NEXT: vmv1r.v v13, v12 |
| ; CHECK-NEXT: vmv1r.v v14, v12 |
| ; CHECK-NEXT: vmv1r.v v15, v12 |
| ; CHECK-NEXT: vmv1r.v v16, v12 |
| ; CHECK-NEXT: vmv1r.v v17, v12 |
| ; CHECK-NEXT: vmv1r.v v18, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg7_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg7_mask_nxv4i16_nxv4i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v12, v8 |
| ; CHECK-NEXT: vmv1r.v v13, v12 |
| ; CHECK-NEXT: vmv1r.v v14, v12 |
| ; CHECK-NEXT: vmv1r.v v15, v12 |
| ; CHECK-NEXT: vmv1r.v v16, v12 |
| ; CHECK-NEXT: vmv1r.v v17, v12 |
| ; CHECK-NEXT: vmv1r.v v18, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i8>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg7_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg7_nxv4i16_nxv4i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg7_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg7_mask_nxv4i16_nxv4i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i64>, i64) |
| declare void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i64>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg7_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i64> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg7_nxv4i16_nxv4i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v16, v8 |
| ; CHECK-NEXT: vmv1r.v v17, v16 |
| ; CHECK-NEXT: vmv1r.v v18, v16 |
| ; CHECK-NEXT: vmv1r.v v19, v16 |
| ; CHECK-NEXT: vmv1r.v v20, v16 |
| ; CHECK-NEXT: vmv1r.v v21, v16 |
| ; CHECK-NEXT: vmv1r.v v22, v16 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg7ei64.v v16, (a0), v12 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i64> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg7_mask_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg7_mask_nxv4i16_nxv4i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v16, v8 |
| ; CHECK-NEXT: vmv1r.v v17, v16 |
| ; CHECK-NEXT: vmv1r.v v18, v16 |
| ; CHECK-NEXT: vmv1r.v v19, v16 |
| ; CHECK-NEXT: vmv1r.v v20, v16 |
| ; CHECK-NEXT: vmv1r.v v21, v16 |
| ; CHECK-NEXT: vmv1r.v v22, v16 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg7ei64.v v16, (a0), v12, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i16>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg7_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg7_nxv4i16_nxv4i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg7_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg7_mask_nxv4i16_nxv4i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i32>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg8_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg8_nxv4i16_nxv4i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v12, v8 |
| ; CHECK-NEXT: vmv1r.v v13, v12 |
| ; CHECK-NEXT: vmv1r.v v14, v12 |
| ; CHECK-NEXT: vmv1r.v v15, v12 |
| ; CHECK-NEXT: vmv1r.v v16, v12 |
| ; CHECK-NEXT: vmv1r.v v17, v12 |
| ; CHECK-NEXT: vmv1r.v v18, v12 |
| ; CHECK-NEXT: vmv1r.v v19, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg8_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg8_mask_nxv4i16_nxv4i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v12, v8 |
| ; CHECK-NEXT: vmv1r.v v13, v12 |
| ; CHECK-NEXT: vmv1r.v v14, v12 |
| ; CHECK-NEXT: vmv1r.v v15, v12 |
| ; CHECK-NEXT: vmv1r.v v16, v12 |
| ; CHECK-NEXT: vmv1r.v v17, v12 |
| ; CHECK-NEXT: vmv1r.v v18, v12 |
| ; CHECK-NEXT: vmv1r.v v19, v12 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i8>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg8_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg8_nxv4i16_nxv4i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vmv1r.v v17, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg8_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg8_mask_nxv4i16_nxv4i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vmv1r.v v17, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i64>, i64) |
| declare void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i64>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg8_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i64> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg8_nxv4i16_nxv4i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v16, v8 |
| ; CHECK-NEXT: vmv1r.v v17, v16 |
| ; CHECK-NEXT: vmv1r.v v18, v16 |
| ; CHECK-NEXT: vmv1r.v v19, v16 |
| ; CHECK-NEXT: vmv1r.v v20, v16 |
| ; CHECK-NEXT: vmv1r.v v21, v16 |
| ; CHECK-NEXT: vmv1r.v v22, v16 |
| ; CHECK-NEXT: vmv1r.v v23, v16 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg8ei64.v v16, (a0), v12 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i64> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg8_mask_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg8_mask_nxv4i16_nxv4i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v16, v8 |
| ; CHECK-NEXT: vmv1r.v v17, v16 |
| ; CHECK-NEXT: vmv1r.v v18, v16 |
| ; CHECK-NEXT: vmv1r.v v19, v16 |
| ; CHECK-NEXT: vmv1r.v v20, v16 |
| ; CHECK-NEXT: vmv1r.v v21, v16 |
| ; CHECK-NEXT: vmv1r.v v22, v16 |
| ; CHECK-NEXT: vmv1r.v v23, v16 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg8ei64.v v16, (a0), v12, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i16>, <vscale x 4 x i1>, i64) |
| |
| define void @test_vsoxseg8_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg8_nxv4i16_nxv4i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vmv1r.v v17, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg8_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg8_mask_nxv4i16_nxv4i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vmv1r.v v17, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu |
| ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i64(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i64>, i64) |
| declare void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i64(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg2_nxv1i8_nxv1i64(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_nxv1i8_nxv1i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v10, v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg2_mask_nxv1i8_nxv1i64(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_mask_nxv1i8_nxv1i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v10, v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i32(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i32(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i32>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg2_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_nxv1i8_nxv1i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v10, v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg2_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_mask_nxv1i8_nxv1i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v10, v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg2_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_nxv1i8_nxv1i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v10, v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg2_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_mask_nxv1i8_nxv1i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v10, v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg2_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_nxv1i8_nxv1i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v10, v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg2_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg2_mask_nxv1i8_nxv1i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 |
| ; CHECK-NEXT: vmv1r.v v10, v9 |
| ; CHECK-NEXT: vmv1r.v v9, v8 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu |
| ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i64>, i64) |
| declare void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg3_nxv1i8_nxv1i64(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_nxv1i8_nxv1i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg3_mask_nxv1i8_nxv1i64(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i8_nxv1i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i32>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg3_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_nxv1i8_nxv1i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg3_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i8_nxv1i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg3_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_nxv1i8_nxv1i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg3_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i8_nxv1i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg3_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_nxv1i8_nxv1i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg3_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i8_nxv1i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu |
| ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i64>, i64) |
| declare void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg4_nxv1i8_nxv1i64(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_nxv1i8_nxv1i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg4_mask_nxv1i8_nxv1i64(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i8_nxv1i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i32>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg4_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_nxv1i8_nxv1i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg4_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i8_nxv1i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg4_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_nxv1i8_nxv1i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg4_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i8_nxv1i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg4_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_nxv1i8_nxv1i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg4_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i8_nxv1i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu |
| ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i64>, i64) |
| declare void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg5_nxv1i8_nxv1i64(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_nxv1i8_nxv1i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg5_mask_nxv1i8_nxv1i64(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i8_nxv1i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i32>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg5_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_nxv1i8_nxv1i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg5_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i8_nxv1i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg5_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_nxv1i8_nxv1i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg5_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i8_nxv1i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg5_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_nxv1i8_nxv1i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg5_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i8_nxv1i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu |
| ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i64>, i64) |
| declare void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg6_nxv1i8_nxv1i64(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_nxv1i8_nxv1i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg6_mask_nxv1i8_nxv1i64(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i8_nxv1i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i32>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg6_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_nxv1i8_nxv1i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg6_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i8_nxv1i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg6_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_nxv1i8_nxv1i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg6_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i8_nxv1i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i8>, i64) |
| declare void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg6_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_nxv1i8_nxv1i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg6_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i8_nxv1i8: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu |
| ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i64>, i64) |
| declare void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg7_nxv1i8_nxv1i64(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg7_nxv1i8_nxv1i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu |
| ; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg7_mask_nxv1i8_nxv1i64(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i8_nxv1i64: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu |
| ; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i32>, i64) |
| declare void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i32>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg7_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg7_nxv1i8_nxv1i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu |
| ; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg7_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i8_nxv1i32: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu |
| ; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i16>, i64) |
| declare void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64) |
| |
| define void @test_vsoxseg7_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg7_nxv1i8_nxv1i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu |
| ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl) |
| ret void |
| } |
| |
| define void @test_vsoxseg7_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) { |
| ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i8_nxv1i16: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vmv1r.v v10, v8 |
| ; CHECK-NEXT: vmv1r.v v11, v10 |
| ; CHECK-NEXT: vmv1r.v v12, v10 |
| ; CHECK-NEXT: vmv1r.v v13, v10 |
| ; CHECK-NEXT: vmv1r.v v14, v10 |
| ; CHECK-NEXT: vmv1r.v v15, v10 |
| ; CHECK-NEXT: vmv1r.v v16, v10 |
| ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu |
| ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t |
| ; CHECK-NEXT: ret |
| entry: |
| tail call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) |
| ret void |
| } |
| |
| declare void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,&
|