| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2 |
| ; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve2p1 < %s | FileCheck %s |
| |
| ; LD1W |
| |
| define <vscale x 4 x i32> @test_svld1uwq_i32_ss(<vscale x 1 x i1> %pred, ptr %base, i64 %offset) { |
| ; CHECK-LABEL: test_svld1uwq_i32_ss: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ld1w { z0.q }, p0/z, [x0, x1, lsl #2] |
| ; CHECK-NEXT: ret |
| %gep = getelementptr i32, ptr %base, i64 %offset |
| %res = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1uwq.nxv4i32(<vscale x 1 x i1> %pred, ptr %gep) |
| ret <vscale x 4 x i32> %res |
| } |
| |
| define <vscale x 4 x i32> @test_svld1uwq_i32_si(<vscale x 1 x i1> %pred, <vscale x 4 x i32>* %base) { |
| ; CHECK-LABEL: test_svld1uwq_i32_si: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ld1w { z0.q }, p0/z, [x0, #-8, mul vl] |
| ; CHECK-NEXT: ld1w { z1.q }, p0/z, [x0, #7, mul vl] |
| ; CHECK-NEXT: add z0.s, z0.s, z1.s |
| ; CHECK-NEXT: ret |
| %gep1 = getelementptr inbounds <vscale x 1 x i32>, <vscale x 1 x i32>* %base, i64 -8 |
| %res1 = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1uwq.nxv4i32(<vscale x 1 x i1> %pred, ptr %gep1) |
| |
| %gep2 = getelementptr inbounds <vscale x 1 x i32>, <vscale x 1 x i32>* %base, i64 7 |
| %res2 = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1uwq.nxv4i32(<vscale x 1 x i1> %pred, ptr %gep2) |
| |
| %res = add <vscale x 4 x i32> %res1, %res2 |
| ret <vscale x 4 x i32> %res |
| } |
| |
| define <vscale x 4 x i32> @test_svld1uwq_i32_out_of_bound(<vscale x 1 x i1> %pred, <vscale x 4 x i32>* %base) { |
| ; CHECK-LABEL: test_svld1uwq_i32_out_of_bound: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: addvl x8, x0, #2 |
| ; CHECK-NEXT: ld1w { z0.q }, p0/z, [x8] |
| ; CHECK-NEXT: ret |
| %gep = getelementptr inbounds <vscale x 1 x i32>, <vscale x 1 x i32>* %base, i64 8 |
| %res = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1uwq.nxv4i32(<vscale x 1 x i1> %pred, ptr %gep) |
| |
| ret <vscale x 4 x i32> %res |
| } |
| |
| define <vscale x 4 x float> @test_svld1uwq_f32_ss(<vscale x 1 x i1> %pred, ptr %base, i64 %offset) { |
| ; CHECK-LABEL: test_svld1uwq_f32_ss: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ld1w { z0.q }, p0/z, [x0, x1, lsl #2] |
| ; CHECK-NEXT: ret |
| %gep = getelementptr float, ptr %base, i64 %offset |
| %res = call <vscale x 4 x float> @llvm.aarch64.sve.ld1uwq.nxv4f32(<vscale x 1 x i1> %pred, ptr %gep) |
| ret <vscale x 4 x float> %res |
| } |
| |
| define <vscale x 4 x float> @test_svld1uwq_f32_si(<vscale x 1 x i1> %pred, <vscale x 1 x float>* %base) { |
| ; CHECK-LABEL: test_svld1uwq_f32_si: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ld1w { z0.q }, p0/z, [x0, #-8, mul vl] |
| ; CHECK-NEXT: ld1w { z1.q }, p0/z, [x0, #7, mul vl] |
| ; CHECK-NEXT: fadd z0.s, z0.s, z1.s |
| ; CHECK-NEXT: ret |
| %gep1 = getelementptr inbounds <vscale x 1 x float>, <vscale x 1 x float>* %base, i64 -8 |
| %res1 = call <vscale x 4 x float> @llvm.aarch64.sve.ld1uwq.nxv4f32(<vscale x 1 x i1> %pred, ptr %gep1) |
| |
| %gep2 = getelementptr inbounds <vscale x 1 x float>, <vscale x 1 x float>* %base, i64 7 |
| %res2 = call <vscale x 4 x float> @llvm.aarch64.sve.ld1uwq.nxv4f32(<vscale x 1 x i1> %pred, ptr %gep2) |
| |
| %res = fadd <vscale x 4 x float> %res1, %res2 |
| ret <vscale x 4 x float> %res |
| } |
| |
| ; LD1D |
| |
| define <vscale x 2 x i64> @test_svld1udq_i64_ss(<vscale x 1 x i1> %pred, ptr %base, i64 %offset) { |
| ; CHECK-LABEL: test_svld1udq_i64_ss: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ld1d { z0.q }, p0/z, [x0, x1, lsl #3] |
| ; CHECK-NEXT: ret |
| %gep = getelementptr i64, ptr %base, i64 %offset |
| %res = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1udq.nxv2i64(<vscale x 1 x i1> %pred, ptr %gep) |
| ret <vscale x 2 x i64> %res |
| } |
| |
| define <vscale x 2 x i64> @test_svld1udq_i64_si(<vscale x 1 x i1> %pred, <vscale x 1 x i64>* %base) { |
| ; CHECK-LABEL: test_svld1udq_i64_si: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ld1d { z0.q }, p0/z, [x0, #-8, mul vl] |
| ; CHECK-NEXT: ld1d { z1.q }, p0/z, [x0, #7, mul vl] |
| ; CHECK-NEXT: add z0.d, z0.d, z1.d |
| ; CHECK-NEXT: ret |
| %gep1 = getelementptr inbounds <vscale x 1 x i64>, <vscale x 1 x i64>* %base, i64 -8 |
| %res1 = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1udq.nxv2i64(<vscale x 1 x i1> %pred, ptr %gep1) |
| |
| %gep2 = getelementptr inbounds <vscale x 1 x i64>, <vscale x 1 x i64>* %base, i64 7 |
| %res2 = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1udq.nxv2i64(<vscale x 1 x i1> %pred, ptr %gep2) |
| |
| %res = add <vscale x 2 x i64> %res1, %res2 |
| ret <vscale x 2 x i64> %res |
| } |
| |
| define <vscale x 2 x i64> @test_svld1udq_i64_out_of_bound(<vscale x 1 x i1> %pred, <vscale x 1 x i64>* %base) { |
| ; CHECK-LABEL: test_svld1udq_i64_out_of_bound: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: addvl x8, x0, #-5 |
| ; CHECK-NEXT: ld1d { z0.q }, p0/z, [x8] |
| ; CHECK-NEXT: ret |
| %gep = getelementptr inbounds <vscale x 1 x i64>, <vscale x 1 x i64>* %base, i64 -10 |
| %res = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1udq.nxv2i64(<vscale x 1 x i1> %pred, ptr %gep) |
| |
| ret <vscale x 2 x i64> %res |
| } |
| |
| define <vscale x 2 x double> @test_svld1udq_f64_ss(<vscale x 1 x i1> %pred, ptr %base, i64 %offset) { |
| ; CHECK-LABEL: test_svld1udq_f64_ss: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ld1d { z0.q }, p0/z, [x0, x1, lsl #3] |
| ; CHECK-NEXT: ret |
| %gep = getelementptr double, ptr %base, i64 %offset |
| %res = call <vscale x 2 x double> @llvm.aarch64.sve.ld1udq.nxv2f64(<vscale x 1 x i1> %pred, ptr %gep) |
| ret <vscale x 2 x double> %res |
| } |
| |
| define <vscale x 2 x double> @test_svld1udq_f64_si(<vscale x 1 x i1> %pred, <vscale x 1 x double>* %base) { |
| ; CHECK-LABEL: test_svld1udq_f64_si: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ld1d { z0.q }, p0/z, [x0, #-8, mul vl] |
| ; CHECK-NEXT: ld1d { z1.q }, p0/z, [x0, #7, mul vl] |
| ; CHECK-NEXT: fadd z0.d, z0.d, z1.d |
| ; CHECK-NEXT: ret |
| %gep1 = getelementptr inbounds <vscale x 1 x double>, <vscale x 1 x double>* %base, i64 -8 |
| %res1 = call <vscale x 2 x double> @llvm.aarch64.sve.ld1udq.nxv2f64(<vscale x 1 x i1> %pred, ptr %gep1) |
| |
| %gep2 = getelementptr inbounds <vscale x 1 x double>, <vscale x 1 x double>* %base, i64 7 |
| %res2 = call <vscale x 2 x double> @llvm.aarch64.sve.ld1udq.nxv2f64(<vscale x 1 x i1> %pred, ptr %gep2) |
| |
| %res = fadd <vscale x 2 x double> %res1, %res2 |
| ret <vscale x 2 x double> %res |
| } |
| |
| declare <vscale x 4 x i32> @llvm.aarch64.sve.ld1uwq.nxv4i32(<vscale x 1 x i1>, ptr) |
| declare <vscale x 4 x float> @llvm.aarch64.sve.ld1uwq.nxv4f32(<vscale x 1 x i1>, ptr) |
| |
| declare <vscale x 2 x i64> @llvm.aarch64.sve.ld1udq.nxv2i64(<vscale x 1 x i1>, ptr) |
| declare <vscale x 2 x double> @llvm.aarch64.sve.ld1udq.nxv2f64(<vscale x 1 x i1>, ptr) |