blob: 93d9f974fdbd7ce7cda2cc6b52a576e032741c18 [file] [edit]
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve < %s | FileCheck %s
define <vscale x 2 x i64> @zload_nxv2i8(ptr %src) {
; CHECK-LABEL: zload_nxv2i8:
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0]
; CHECK-NEXT: ret
%load = load <vscale x 2 x i8>, ptr %src, align 1
%ext = zext <vscale x 2 x i8> %load to <vscale x 2 x i64>
ret <vscale x 2 x i64> %ext
}
define <vscale x 2 x i64> @zload_nxv2i16(ptr %src) {
; CHECK-LABEL: zload_nxv2i16:
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0]
; CHECK-NEXT: ret
%load = load <vscale x 2 x i16>, ptr %src, align 2
%ext = zext <vscale x 2 x i16> %load to <vscale x 2 x i64>
ret <vscale x 2 x i64> %ext
}
define <vscale x 2 x i64> @zload_nxv2i32(ptr %src) {
; CHECK-LABEL: zload_nxv2i32:
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0]
; CHECK-NEXT: ret
%load = load <vscale x 2 x i32>, ptr %src, align 4
%ext = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
ret <vscale x 2 x i64> %ext
}
define <vscale x 4 x i32> @zload_nxv4i8(ptr %src) {
; CHECK-LABEL: zload_nxv4i8:
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: ld1b { z0.s }, p0/z, [x0]
; CHECK-NEXT: ret
%load = load <vscale x 4 x i8>, ptr %src, align 1
%ext = zext <vscale x 4 x i8> %load to <vscale x 4 x i32>
ret <vscale x 4 x i32> %ext
}
define <vscale x 4 x i32> @zload_nxv4i16(ptr %src) {
; CHECK-LABEL: zload_nxv4i16:
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0]
; CHECK-NEXT: ret
%load = load <vscale x 4 x i16>, ptr %src, align 2
%ext = zext <vscale x 4 x i16> %load to <vscale x 4 x i32>
ret <vscale x 4 x i32> %ext
}
define <vscale x 8 x i16> @zload_nxv8i8(ptr %src) {
; CHECK-LABEL: zload_nxv8i8:
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.h
; CHECK-NEXT: ld1b { z0.h }, p0/z, [x0]
; CHECK-NEXT: ret
%load = load <vscale x 8 x i8>, ptr %src, align 1
%ext = zext <vscale x 8 x i8> %load to <vscale x 8 x i16>
ret <vscale x 8 x i16> %ext
}
; Return type requires splitting
define <vscale x 8 x i64> @zload_nxv8i16(ptr %a) {
; CHECK-LABEL: zload_nxv8i16:
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0]
; CHECK-NEXT: ld1h { z1.d }, p0/z, [x0, #1, mul vl]
; CHECK-NEXT: ld1h { z2.d }, p0/z, [x0, #2, mul vl]
; CHECK-NEXT: ld1h { z3.d }, p0/z, [x0, #3, mul vl]
; CHECK-NEXT: ret
%load = load <vscale x 8 x i16>, ptr %a, align 2
%ext = zext <vscale x 8 x i16> %load to <vscale x 8 x i64>
ret <vscale x 8 x i64> %ext
}
define <vscale x 8 x i32> @zload_nxv8i8_nxv8i32(ptr %a) {
; CHECK-LABEL: zload_nxv8i8_nxv8i32:
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: ld1b { z0.s }, p0/z, [x0]
; CHECK-NEXT: ld1b { z1.s }, p0/z, [x0, #1, mul vl]
; CHECK-NEXT: ret
%load = load <vscale x 8 x i8>, ptr %a, align 1
%ext = zext <vscale x 8 x i8> %load to <vscale x 8 x i32>
ret <vscale x 8 x i32> %ext
}
; load requires promotion
define <vscale x 2 x double> @zload_2i16_2f64(ptr noalias %in) {
; CHECK-LABEL: zload_2i16_2f64:
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0]
; CHECK-NEXT: ucvtf z0.d, p0/m, z0.d
; CHECK-NEXT: ret
%load = load <vscale x 2 x i16>, ptr %in, align 2
%zext = zext <vscale x 2 x i16> %load to <vscale x 2 x i32>
%res = uitofp <vscale x 2 x i32> %zext to <vscale x 2 x double>
ret <vscale x 2 x double> %res
}
; Extending loads from unpacked to wide illegal types
define <vscale x 4 x i64> @zload_4i8_4i64(ptr %a) {
; CHECK-LABEL: zload_4i8_4i64:
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0]
; CHECK-NEXT: ld1b { z1.d }, p0/z, [x0, #1, mul vl]
; CHECK-NEXT: ret
%aval = load <vscale x 4 x i8>, ptr %a, align 1
%aext = zext <vscale x 4 x i8> %aval to <vscale x 4 x i64>
ret <vscale x 4 x i64> %aext
}
define <vscale x 4 x i64> @zload_4i16_4i64(ptr %a) {
; CHECK-LABEL: zload_4i16_4i64:
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0]
; CHECK-NEXT: ld1h { z1.d }, p0/z, [x0, #1, mul vl]
; CHECK-NEXT: ret
%aval = load <vscale x 4 x i16>, ptr %a, align 2
%aext = zext <vscale x 4 x i16> %aval to <vscale x 4 x i64>
ret <vscale x 4 x i64> %aext
}
define <vscale x 8 x i32> @zload_8i8_8i32(ptr %a) {
; CHECK-LABEL: zload_8i8_8i32:
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: ld1b { z0.s }, p0/z, [x0]
; CHECK-NEXT: ld1b { z1.s }, p0/z, [x0, #1, mul vl]
; CHECK-NEXT: ret
%aval = load <vscale x 8 x i8>, ptr %a, align 1
%aext = zext <vscale x 8 x i8> %aval to <vscale x 8 x i32>
ret <vscale x 8 x i32> %aext
}
define <vscale x 8 x i64> @zload_8i8_8i64(ptr %a) {
; CHECK-LABEL: zload_8i8_8i64:
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0]
; CHECK-NEXT: ld1b { z1.d }, p0/z, [x0, #1, mul vl]
; CHECK-NEXT: ld1b { z2.d }, p0/z, [x0, #2, mul vl]
; CHECK-NEXT: ld1b { z3.d }, p0/z, [x0, #3, mul vl]
; CHECK-NEXT: ret
%aval = load <vscale x 8 x i8>, ptr %a, align 1
%aext = zext <vscale x 8 x i8> %aval to <vscale x 8 x i64>
ret <vscale x 8 x i64> %aext
}
define <vscale x 4 x i64> @zload_x2_4i8_4i64(ptr %a, ptr %b) {
; CHECK-LABEL: zload_x2_4i8_4i64:
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: ld1b { z1.d }, p0/z, [x0, #1, mul vl]
; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0]
; CHECK-NEXT: ld1b { z2.d }, p0/z, [x1, #1, mul vl]
; CHECK-NEXT: ld1b { z3.d }, p0/z, [x1]
; CHECK-NEXT: add z1.d, z1.d, z2.d
; CHECK-NEXT: add z0.d, z0.d, z3.d
; CHECK-NEXT: ret
%aval = load <vscale x 4 x i8>, ptr %a, align 1
%bval = load <vscale x 4 x i8>, ptr %b, align 1
%aext = zext <vscale x 4 x i8> %aval to <vscale x 4 x i64>
%bext = zext <vscale x 4 x i8> %bval to <vscale x 4 x i64>
%res = add <vscale x 4 x i64> %aext, %bext
ret <vscale x 4 x i64> %res
}
define <vscale x 4 x i64> @zload_x2_4i16_4i64(ptr %a, ptr %b) {
; CHECK-LABEL: zload_x2_4i16_4i64:
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: ld1h { z1.d }, p0/z, [x0, #1, mul vl]
; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0]
; CHECK-NEXT: ld1h { z2.d }, p0/z, [x1, #1, mul vl]
; CHECK-NEXT: ld1h { z3.d }, p0/z, [x1]
; CHECK-NEXT: add z1.d, z1.d, z2.d
; CHECK-NEXT: add z0.d, z0.d, z3.d
; CHECK-NEXT: ret
%aval = load <vscale x 4 x i16>, ptr %a, align 2
%bval = load <vscale x 4 x i16>, ptr %b, align 2
%aext = zext <vscale x 4 x i16> %aval to <vscale x 4 x i64>
%bext = zext <vscale x 4 x i16> %bval to <vscale x 4 x i64>
%res = add <vscale x 4 x i64> %aext, %bext
ret <vscale x 4 x i64> %res
}
define <vscale x 8 x i32> @zload_x2_8i8_8i32(ptr %a, ptr %b) {
; CHECK-LABEL: zload_x2_8i8_8i32:
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: ld1b { z1.s }, p0/z, [x0, #1, mul vl]
; CHECK-NEXT: ld1b { z0.s }, p0/z, [x0]
; CHECK-NEXT: ld1b { z2.s }, p0/z, [x1, #1, mul vl]
; CHECK-NEXT: ld1b { z3.s }, p0/z, [x1]
; CHECK-NEXT: add z1.s, z1.s, z2.s
; CHECK-NEXT: add z0.s, z0.s, z3.s
; CHECK-NEXT: ret
%aval = load <vscale x 8 x i8>, ptr %a, align 1
%bval = load <vscale x 8 x i8>, ptr %b, align 1
%aext = zext <vscale x 8 x i8> %aval to <vscale x 8 x i32>
%bext = zext <vscale x 8 x i8> %bval to <vscale x 8 x i32>
%res = add <vscale x 8 x i32> %aext, %bext
ret <vscale x 8 x i32> %res
}
define <vscale x 8 x i64> @zload_x2_8i8_8i64(ptr %a, ptr %b) {
; CHECK-LABEL: zload_x2_8i8_8i64:
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: ld1b { z3.d }, p0/z, [x0, #3, mul vl]
; CHECK-NEXT: ld1b { z2.d }, p0/z, [x0, #2, mul vl]
; CHECK-NEXT: ld1b { z1.d }, p0/z, [x0, #1, mul vl]
; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0]
; CHECK-NEXT: ld1b { z4.d }, p0/z, [x1]
; CHECK-NEXT: ld1b { z5.d }, p0/z, [x1, #3, mul vl]
; CHECK-NEXT: ld1b { z6.d }, p0/z, [x1, #2, mul vl]
; CHECK-NEXT: ld1b { z7.d }, p0/z, [x1, #1, mul vl]
; CHECK-NEXT: add z0.d, z0.d, z4.d
; CHECK-NEXT: add z3.d, z3.d, z5.d
; CHECK-NEXT: add z1.d, z1.d, z7.d
; CHECK-NEXT: add z2.d, z2.d, z6.d
; CHECK-NEXT: ret
%aval = load <vscale x 8 x i8>, ptr %a, align 1
%bval = load <vscale x 8 x i8>, ptr %b, align 1
%aext = zext <vscale x 8 x i8> %aval to <vscale x 8 x i64>
%bext = zext <vscale x 8 x i8> %bval to <vscale x 8 x i64>
%res = add <vscale x 8 x i64> %aext, %bext
ret <vscale x 8 x i64> %res
}
define <vscale x 2 x i64> @load_frozen_before_zext(ptr %src) {
; CHECK-LABEL: load_frozen_before_zext:
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0]
; CHECK-NEXT: ret
%load = load <vscale x 2 x i8>, ptr %src
%load.frozen = freeze <vscale x 2 x i8> %load
%ext = zext <vscale x 2 x i8> %load.frozen to <vscale x 2 x i64>
ret <vscale x 2 x i64> %ext
}
define <vscale x 8 x i32> @load_frozen_before_zext_needs_splitting(ptr %src) {
; CHECK-LABEL: load_frozen_before_zext_needs_splitting:
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: ld1b { z0.s }, p0/z, [x0]
; CHECK-NEXT: ld1b { z1.s }, p0/z, [x0, #1, mul vl]
; CHECK-NEXT: ret
%load = load <vscale x 8 x i8>, ptr %src
%load.frozen = freeze <vscale x 8 x i8> %load
%ext = zext <vscale x 8 x i8> %load.frozen to <vscale x 8 x i32>
ret <vscale x 8 x i32> %ext
}
; A multi-use freeze in this example effectively means the load is also multi-use.
define <vscale x 2 x i64> @load_frozen_before_zext_multiuse(ptr %src) {
; CHECK-LABEL: load_frozen_before_zext_multiuse:
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0]
; CHECK-NEXT: mov z1.d, z0.d
; CHECK-NEXT: and z0.d, z0.d, #0xff
; CHECK-NEXT: // fake_use: $z1
; CHECK-NEXT: ret
%load = load <vscale x 2 x i8>, ptr %src, align 1
%load.frozen = freeze <vscale x 2 x i8> %load
%ext = zext <vscale x 2 x i8> %load.frozen to <vscale x 2 x i64>
call void (...) @llvm.fake.use(<vscale x 2 x i8> %load.frozen)
ret <vscale x 2 x i64> %ext
}
; In this example the freeze is used twice and the load used once.
define <vscale x 2 x i64> @load_frozen_before_zext_multiuse2(ptr %src) {
; CHECK-LABEL: load_frozen_before_zext_multiuse2:
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0]
; CHECK-NEXT: mov z1.d, z0.d
; CHECK-NEXT: mul z0.d, z0.d, #13
; CHECK-NEXT: and z1.d, z1.d, #0xff
; CHECK-NEXT: sxtb z0.d, p0/m, z0.d
; CHECK-NEXT: add z0.d, z1.d, z0.d
; CHECK-NEXT: ret
%load = load <vscale x 2 x i8>, ptr %src, align 1
%load.frozen = freeze <vscale x 2 x i8> %load
%ext = zext <vscale x 2 x i8> %load.frozen to <vscale x 2 x i64>
%mul = mul <vscale x 2 x i8> %load.frozen, splat (i8 13)
%mul.ext = sext <vscale x 2 x i8> %mul to <vscale x 2 x i64>
%res = add <vscale x 2 x i64> %ext, %mul.ext
ret <vscale x 2 x i64> %res
}
; In this example the freeze is used 3 times and the load used twice via the chain.
define <vscale x 2 x i64> @load_frozen_before_sext_multiuse3(ptr %src, ptr %dst1, ptr %dst2) {
; CHECK-LABEL: load_frozen_before_sext_multiuse3:
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0]
; CHECK-NEXT: movprfx z1, z0
; CHECK-NEXT: sxtb z1.d, p0/m, z0.d
; CHECK-NEXT: and z0.d, z0.d, #0xff
; CHECK-NEXT: mov z2.d, z1.d
; CHECK-NEXT: mul z1.d, z1.d, #49
; CHECK-NEXT: mul z2.d, z2.d, #33
; CHECK-NEXT: add z0.d, z0.d, z1.d
; CHECK-NEXT: st1w { z2.d }, p0, [x1]
; CHECK-NEXT: st1h { z1.d }, p0, [x2]
; CHECK-NEXT: ret
%load = load <vscale x 2 x i8>, ptr %src, align 1
%load.frozen = freeze <vscale x 2 x i8> %load
%zext64 = zext <vscale x 2 x i8> %load.frozen to <vscale x 2 x i64>
%sext32 = sext <vscale x 2 x i8> %load.frozen to <vscale x 2 x i32>
%sext16 = sext <vscale x 2 x i8> %load.frozen to <vscale x 2 x i16>
%mul32 = mul <vscale x 2 x i32> %sext32, splat (i32 33)
%mul16 = mul <vscale x 2 x i16> %sext16, splat (i16 49)
store <vscale x 2 x i32> %mul32, ptr %dst1, align 4
store <vscale x 2 x i16> %mul16, ptr %dst2, align 2
%mul32.ext64 = sext <vscale x 2 x i16> %mul16 to <vscale x 2 x i64>
%res = add <vscale x 2 x i64> %zext64, %mul32.ext64
ret <vscale x 2 x i64> %res
}
; In this example the freeze is used twice - once for the sign-extend and
; once for the icmp (or SETCC)
define <vscale x 2 x i64> @load_frozen_before_zext_multiuse4(ptr %src1, ptr %src2, ptr %dst1) {
; CHECK-LABEL: load_frozen_before_zext_multiuse4:
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: mov w8, #1234 // =0x4d2
; CHECK-NEXT: mov z3.d, #27 // =0x1b
; CHECK-NEXT: mov z1.d, x8
; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0]
; CHECK-NEXT: ld1b { z2.d }, p0/z, [x1]
; CHECK-NEXT: and z0.d, z0.d, #0xff
; CHECK-NEXT: cmpeq p1.d, p0/z, z0.d, #3
; CHECK-NEXT: add z0.d, z0.d, z1.d
; CHECK-NEXT: sel z1.d, p1, z2.d, z3.d
; CHECK-NEXT: st1b { z1.d }, p0, [x2]
; CHECK-NEXT: ret
%load = load <vscale x 2 x i8>, ptr %src1, align 1
%load.frozen = freeze <vscale x 2 x i8> %load
%zext64 = zext <vscale x 2 x i8> %load.frozen to <vscale x 2 x i64>
%cmp = icmp eq <vscale x 2 x i8> %load.frozen, splat (i8 3)
%load2 = load <vscale x 2 x i8>, ptr %src2, align 1
%sel = select <vscale x 2 x i1> %cmp, <vscale x 2 x i8> %load2, <vscale x 2 x i8> splat (i8 27)
store <vscale x 2 x i8> %sel, ptr %dst1, align 1
%res = add <vscale x 2 x i64> %zext64, splat (i64 1234)
ret <vscale x 2 x i64> %res
}
; There is one use of the freeze, and multiple uses of the load via the chain.
define <vscale x 16 x i64> @load_frozen_before_zext_multiuse5_dst_illegal(ptr %src, ptr %dst) {
; CHECK-LABEL: load_frozen_before_zext_multiuse5_dst_illegal:
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: mov z24.d, #3 // =0x3
; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0]
; CHECK-NEXT: ld1b { z1.d }, p0/z, [x0, #1, mul vl]
; CHECK-NEXT: ld1b { z2.d }, p0/z, [x0, #2, mul vl]
; CHECK-NEXT: ld1b { z3.d }, p0/z, [x0, #3, mul vl]
; CHECK-NEXT: ld1b { z4.d }, p0/z, [x0, #4, mul vl]
; CHECK-NEXT: ld1b { z5.d }, p0/z, [x0, #5, mul vl]
; CHECK-NEXT: ld1b { z6.d }, p0/z, [x0, #6, mul vl]
; CHECK-NEXT: ld1b { z7.d }, p0/z, [x0, #7, mul vl]
; CHECK-NEXT: str z24, [x1, #6, mul vl]
; CHECK-NEXT: str z24, [x1, #7, mul vl]
; CHECK-NEXT: str z24, [x1, #4, mul vl]
; CHECK-NEXT: str z24, [x1, #5, mul vl]
; CHECK-NEXT: str z24, [x1, #2, mul vl]
; CHECK-NEXT: str z24, [x1, #3, mul vl]
; CHECK-NEXT: str z24, [x1]
; CHECK-NEXT: str z24, [x1, #1, mul vl]
; CHECK-NEXT: ret
%load = load <vscale x 16 x i8>, ptr %src, align 1
%load.frozen = freeze <vscale x 16 x i8> %load
%ext = zext <vscale x 16 x i8> %load.frozen to <vscale x 16 x i64>
store <vscale x 16 x i64> splat (i64 3), ptr %dst, align 8
ret <vscale x 16 x i64> %ext
}
define <vscale x 2 x i64> @load_frozen_before_zext_multiuse5_src_illegal(ptr %src, ptr %dst) {
; CHECK-LABEL: load_frozen_before_zext_multiuse5_src_illegal:
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: mov z1.d, #3 // =0x3
; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0]
; CHECK-NEXT: str z1, [x1]
; CHECK-NEXT: ret
%load = load <vscale x 2 x i8>, ptr %src, align 1
%load.frozen = freeze <vscale x 2 x i8> %load
%ext = zext <vscale x 2 x i8> %load.frozen to <vscale x 2 x i64>
store <vscale x 2 x i64> splat (i64 3), ptr %dst, align 8
ret <vscale x 2 x i64> %ext
}
define <vscale x 4 x i64> @load_frozen_before_zext_multiuse5_both_illegal(ptr %src, ptr %dst) {
; CHECK-LABEL: load_frozen_before_zext_multiuse5_both_illegal:
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: mov z2.d, #3 // =0x3
; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0]
; CHECK-NEXT: ld1b { z1.d }, p0/z, [x0, #1, mul vl]
; CHECK-NEXT: str z2, [x1, #1, mul vl]
; CHECK-NEXT: str z2, [x1]
; CHECK-NEXT: ret
%load = load <vscale x 4 x i8>, ptr %src, align 1
%load.frozen = freeze <vscale x 4 x i8> %load
%ext = zext <vscale x 4 x i8> %load.frozen to <vscale x 4 x i64>
store <vscale x 4 x i64> splat (i64 3), ptr %dst, align 8
ret <vscale x 4 x i64> %ext
}