| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve -asm-verbose=1 < %s | FileCheck %s |
| |
| ; |
| ; LD1SB/LD1B |
| ; |
| |
| define <vscale x 16 x i32> @ld1b_i8_sext_i32(<vscale x 16 x i8> *%base) { |
| ; CHECK-LABEL: ld1b_i8_sext_i32: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.s |
| ; CHECK-NEXT: ld1sb { z0.s }, p0/z, [x0] |
| ; CHECK-NEXT: ld1sb { z1.s }, p0/z, [x0, #1, mul vl] |
| ; CHECK-NEXT: ld1sb { z2.s }, p0/z, [x0, #2, mul vl] |
| ; CHECK-NEXT: ld1sb { z3.s }, p0/z, [x0, #3, mul vl] |
| ; CHECK-NEXT: ret |
| %wide.load = load <vscale x 16 x i8>, <vscale x 16 x i8>* %base |
| %res = sext <vscale x 16 x i8> %wide.load to <vscale x 16 x i32> |
| ret <vscale x 16 x i32> %res |
| } |
| |
| define <vscale x 16 x i32> @ld1b_i8_zext_i32(<vscale x 16 x i8> *%base) { |
| ; CHECK-LABEL: ld1b_i8_zext_i32: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.s |
| ; CHECK-NEXT: ld1b { z0.s }, p0/z, [x0] |
| ; CHECK-NEXT: ld1b { z1.s }, p0/z, [x0, #1, mul vl] |
| ; CHECK-NEXT: ld1b { z2.s }, p0/z, [x0, #2, mul vl] |
| ; CHECK-NEXT: ld1b { z3.s }, p0/z, [x0, #3, mul vl] |
| ; CHECK-NEXT: ret |
| %wide.load = load <vscale x 16 x i8>, <vscale x 16 x i8>* %base |
| %res = zext <vscale x 16 x i8> %wide.load to <vscale x 16 x i32> |
| ret <vscale x 16 x i32> %res |
| } |
| |
| define <vscale x 16 x i64> @ld1b_i8_sext(<vscale x 16 x i8> *%base) { |
| ; CHECK-LABEL: ld1b_i8_sext: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.d |
| ; CHECK-NEXT: ld1sb { z0.d }, p0/z, [x0] |
| ; CHECK-NEXT: ld1sb { z1.d }, p0/z, [x0, #1, mul vl] |
| ; CHECK-NEXT: ld1sb { z2.d }, p0/z, [x0, #2, mul vl] |
| ; CHECK-NEXT: ld1sb { z3.d }, p0/z, [x0, #3, mul vl] |
| ; CHECK-NEXT: ld1sb { z4.d }, p0/z, [x0, #4, mul vl] |
| ; CHECK-NEXT: ld1sb { z5.d }, p0/z, [x0, #5, mul vl] |
| ; CHECK-NEXT: ld1sb { z6.d }, p0/z, [x0, #6, mul vl] |
| ; CHECK-NEXT: ld1sb { z7.d }, p0/z, [x0, #7, mul vl] |
| ; CHECK-NEXT: ret |
| %wide.load = load <vscale x 16 x i8>, <vscale x 16 x i8>* %base |
| %res = sext <vscale x 16 x i8> %wide.load to <vscale x 16 x i64> |
| ret <vscale x 16 x i64> %res |
| } |
| |
| define <vscale x 16 x i64> @ld1b_i8_zext(<vscale x 16 x i8> *%base) { |
| ; CHECK-LABEL: ld1b_i8_zext: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.d |
| ; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0] |
| ; CHECK-NEXT: ld1b { z1.d }, p0/z, [x0, #1, mul vl] |
| ; CHECK-NEXT: ld1b { z2.d }, p0/z, [x0, #2, mul vl] |
| ; CHECK-NEXT: ld1b { z3.d }, p0/z, [x0, #3, mul vl] |
| ; CHECK-NEXT: ld1b { z4.d }, p0/z, [x0, #4, mul vl] |
| ; CHECK-NEXT: ld1b { z5.d }, p0/z, [x0, #5, mul vl] |
| ; CHECK-NEXT: ld1b { z6.d }, p0/z, [x0, #6, mul vl] |
| ; CHECK-NEXT: ld1b { z7.d }, p0/z, [x0, #7, mul vl] |
| ; CHECK-NEXT: ret |
| %wide.load = load <vscale x 16 x i8>, <vscale x 16 x i8>* %base |
| %res = zext <vscale x 16 x i8> %wide.load to <vscale x 16 x i64> |
| ret <vscale x 16 x i64> %res |
| } |
| |
| ; |
| ; LD1H |
| ; |
| |
| define <vscale x 8 x i64> @ld1h_i16_sext(<vscale x 8 x i16> *%base) { |
| ; CHECK-LABEL: ld1h_i16_sext: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.d |
| ; CHECK-NEXT: ld1sh { z0.d }, p0/z, [x0] |
| ; CHECK-NEXT: ld1sh { z1.d }, p0/z, [x0, #1, mul vl] |
| ; CHECK-NEXT: ld1sh { z2.d }, p0/z, [x0, #2, mul vl] |
| ; CHECK-NEXT: ld1sh { z3.d }, p0/z, [x0, #3, mul vl] |
| ; CHECK-NEXT: ret |
| %wide.load = load <vscale x 8 x i16>, <vscale x 8 x i16>* %base |
| %res = sext <vscale x 8 x i16> %wide.load to <vscale x 8 x i64> |
| ret <vscale x 8 x i64> %res |
| } |
| |
| define <vscale x 8 x i64> @ld1h_i16_zext(<vscale x 8 x i16> *%base) { |
| ; CHECK-LABEL: ld1h_i16_zext: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.d |
| ; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0] |
| ; CHECK-NEXT: ld1h { z1.d }, p0/z, [x0, #1, mul vl] |
| ; CHECK-NEXT: ld1h { z2.d }, p0/z, [x0, #2, mul vl] |
| ; CHECK-NEXT: ld1h { z3.d }, p0/z, [x0, #3, mul vl] |
| ; CHECK-NEXT: ret |
| %wide.load = load <vscale x 8 x i16>, <vscale x 8 x i16>* %base |
| %res = zext <vscale x 8 x i16> %wide.load to <vscale x 8 x i64> |
| ret <vscale x 8 x i64> %res |
| } |
| |
| ; |
| ; LD1W |
| ; |
| |
| define <vscale x 4 x i64> @ld1w_i32_sext(<vscale x 4 x i32> *%base) { |
| ; CHECK-LABEL: ld1w_i32_sext: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.d |
| ; CHECK-NEXT: ld1sw { z0.d }, p0/z, [x0] |
| ; CHECK-NEXT: ld1sw { z1.d }, p0/z, [x0, #1, mul vl] |
| ; CHECK-NEXT: ret |
| %wide.load = load <vscale x 4 x i32>, <vscale x 4 x i32>* %base |
| %res = sext <vscale x 4 x i32> %wide.load to <vscale x 4 x i64> |
| ret <vscale x 4 x i64> %res |
| } |
| |
| define <vscale x 4 x i64> @ld1w_i32_zext(<vscale x 4 x i32> *%base) { |
| ; CHECK-LABEL: ld1w_i32_zext: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.d |
| ; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0] |
| ; CHECK-NEXT: ld1w { z1.d }, p0/z, [x0, #1, mul vl] |
| ; CHECK-NEXT: ret |
| %wide.load = load <vscale x 4 x i32>, <vscale x 4 x i32>* %base |
| %res = zext <vscale x 4 x i32> %wide.load to <vscale x 4 x i64> |
| ret <vscale x 4 x i64> %res |
| } |
| |
| |
| ; Extending loads from unpacked to wide illegal types |
| |
| define <vscale x 4 x i64> @zload_4i8_4i64(ptr %a) { |
| ; CHECK-LABEL: zload_4i8_4i64: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.d |
| ; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0] |
| ; CHECK-NEXT: ld1b { z1.d }, p0/z, [x0, #1, mul vl] |
| ; CHECK-NEXT: ret |
| %aval = load <vscale x 4 x i8>, ptr %a |
| %aext = zext <vscale x 4 x i8> %aval to <vscale x 4 x i64> |
| ret <vscale x 4 x i64> %aext |
| } |
| |
| define <vscale x 4 x i64> @zload_4i16_4i64(ptr %a) { |
| ; CHECK-LABEL: zload_4i16_4i64: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.d |
| ; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0] |
| ; CHECK-NEXT: ld1h { z1.d }, p0/z, [x0, #1, mul vl] |
| ; CHECK-NEXT: ret |
| %aval = load <vscale x 4 x i16>, ptr %a |
| %aext = zext <vscale x 4 x i16> %aval to <vscale x 4 x i64> |
| ret <vscale x 4 x i64> %aext |
| } |
| |
| define <vscale x 8 x i32> @zload_8i8_8i32(ptr %a) { |
| ; CHECK-LABEL: zload_8i8_8i32: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.s |
| ; CHECK-NEXT: ld1b { z0.s }, p0/z, [x0] |
| ; CHECK-NEXT: ld1b { z1.s }, p0/z, [x0, #1, mul vl] |
| ; CHECK-NEXT: ret |
| %aval = load <vscale x 8 x i8>, ptr %a |
| %aext = zext <vscale x 8 x i8> %aval to <vscale x 8 x i32> |
| ret <vscale x 8 x i32> %aext |
| } |
| |
| define <vscale x 8 x i64> @zload_8i8_8i64(ptr %a) { |
| ; CHECK-LABEL: zload_8i8_8i64: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.d |
| ; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0] |
| ; CHECK-NEXT: ld1b { z1.d }, p0/z, [x0, #1, mul vl] |
| ; CHECK-NEXT: ld1b { z2.d }, p0/z, [x0, #2, mul vl] |
| ; CHECK-NEXT: ld1b { z3.d }, p0/z, [x0, #3, mul vl] |
| ; CHECK-NEXT: ret |
| %aval = load <vscale x 8 x i8>, ptr %a |
| %aext = zext <vscale x 8 x i8> %aval to <vscale x 8 x i64> |
| ret <vscale x 8 x i64> %aext |
| } |
| |
| define <vscale x 4 x i64> @sload_4i8_4i64(ptr %a) { |
| ; CHECK-LABEL: sload_4i8_4i64: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.d |
| ; CHECK-NEXT: ld1sb { z0.d }, p0/z, [x0] |
| ; CHECK-NEXT: ld1sb { z1.d }, p0/z, [x0, #1, mul vl] |
| ; CHECK-NEXT: ret |
| %aval = load <vscale x 4 x i8>, ptr %a |
| %aext = sext <vscale x 4 x i8> %aval to <vscale x 4 x i64> |
| ret <vscale x 4 x i64> %aext |
| } |
| |
| define <vscale x 4 x i64> @sload_4i16_4i64(ptr %a) { |
| ; CHECK-LABEL: sload_4i16_4i64: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.d |
| ; CHECK-NEXT: ld1sh { z0.d }, p0/z, [x0] |
| ; CHECK-NEXT: ld1sh { z1.d }, p0/z, [x0, #1, mul vl] |
| ; CHECK-NEXT: ret |
| %aval = load <vscale x 4 x i16>, ptr %a |
| %aext = sext <vscale x 4 x i16> %aval to <vscale x 4 x i64> |
| ret <vscale x 4 x i64> %aext |
| } |
| |
| define <vscale x 8 x i32> @sload_8i8_8i32(ptr %a) { |
| ; CHECK-LABEL: sload_8i8_8i32: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.s |
| ; CHECK-NEXT: ld1sb { z0.s }, p0/z, [x0] |
| ; CHECK-NEXT: ld1sb { z1.s }, p0/z, [x0, #1, mul vl] |
| ; CHECK-NEXT: ret |
| %aval = load <vscale x 8 x i8>, ptr %a |
| %aext = sext <vscale x 8 x i8> %aval to <vscale x 8 x i32> |
| ret <vscale x 8 x i32> %aext |
| } |
| |
| define <vscale x 8 x i64> @sload_8i8_8i64(ptr %a) { |
| ; CHECK-LABEL: sload_8i8_8i64: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.d |
| ; CHECK-NEXT: ld1sb { z0.d }, p0/z, [x0] |
| ; CHECK-NEXT: ld1sb { z1.d }, p0/z, [x0, #1, mul vl] |
| ; CHECK-NEXT: ld1sb { z2.d }, p0/z, [x0, #2, mul vl] |
| ; CHECK-NEXT: ld1sb { z3.d }, p0/z, [x0, #3, mul vl] |
| ; CHECK-NEXT: ret |
| %aval = load <vscale x 8 x i8>, ptr %a |
| %aext = sext <vscale x 8 x i8> %aval to <vscale x 8 x i64> |
| ret <vscale x 8 x i64> %aext |
| } |