| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s |
| ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme < %s | FileCheck %s |
| |
| ; |
| ; ST1B |
| ; |
| |
| define void @st1b_upper_bound(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pg, ptr %a) { |
| ; CHECK-LABEL: st1b_upper_bound: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: st1b { z0.b }, p0, [x0, #7, mul vl] |
| ; CHECK-NEXT: ret |
| %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %a, i64 7 |
| %base_scalar = bitcast <vscale x 16 x i8>* %base to ptr |
| call void @llvm.aarch64.sve.st1.nxv16i8(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pg, ptr %base_scalar) |
| ret void |
| } |
| |
| define void @st1b_inbound(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pg, ptr %a) { |
| ; CHECK-LABEL: st1b_inbound: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: st1b { z0.b }, p0, [x0, #1, mul vl] |
| ; CHECK-NEXT: ret |
| %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %a, i64 1 |
| %base_scalar = bitcast <vscale x 16 x i8>* %base to ptr |
| call void @llvm.aarch64.sve.st1.nxv16i8(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pg, ptr %base_scalar) |
| ret void |
| } |
| |
| define void @st1b_lower_bound(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pg, ptr %a) { |
| ; CHECK-LABEL: st1b_lower_bound: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: st1b { z0.b }, p0, [x0, #-8, mul vl] |
| ; CHECK-NEXT: ret |
| %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %a, i64 -8 |
| %base_scalar = bitcast <vscale x 16 x i8>* %base to ptr |
| call void @llvm.aarch64.sve.st1.nxv16i8(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pg, ptr %base_scalar) |
| ret void |
| } |
| |
| define void @st1b_out_of_upper_bound(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pg, ptr %a) { |
| ; CHECK-LABEL: st1b_out_of_upper_bound: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: rdvl x8, #8 |
| ; CHECK-NEXT: st1b { z0.b }, p0, [x0, x8] |
| ; CHECK-NEXT: ret |
| %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %a, i64 8 |
| %base_scalar = bitcast <vscale x 16 x i8>* %base to ptr |
| call void @llvm.aarch64.sve.st1.nxv16i8(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pg, ptr %base_scalar) |
| ret void |
| } |
| |
| define void @st1b_out_of_lower_bound(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pg, ptr %a) { |
| ; CHECK-LABEL: st1b_out_of_lower_bound: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: rdvl x8, #-9 |
| ; CHECK-NEXT: st1b { z0.b }, p0, [x0, x8] |
| ; CHECK-NEXT: ret |
| %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %a, i64 -9 |
| %base_scalar = bitcast <vscale x 16 x i8>* %base to ptr |
| call void @llvm.aarch64.sve.st1.nxv16i8(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pg, ptr %base_scalar) |
| ret void |
| } |
| |
| define void @st1b_s_inbound(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, ptr %a) { |
| ; CHECK-LABEL: st1b_s_inbound: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: st1b { z0.s }, p0, [x0, #7, mul vl] |
| ; CHECK-NEXT: ret |
| %base = getelementptr <vscale x 4 x i8>, <vscale x 4 x i8>* %a, i64 7 |
| %base_scalar = bitcast <vscale x 4 x i8>* %base to ptr |
| %trunc = trunc <vscale x 4 x i32> %data to <vscale x 4 x i8> |
| call void @llvm.aarch64.sve.st1.nxv4i8(<vscale x 4 x i8> %trunc, <vscale x 4 x i1> %pg, ptr %base_scalar) |
| ret void |
| } |
| |
| define void @st1b_h_inbound(<vscale x 8 x i16> %data, <vscale x 8 x i1> %pg, ptr %a) { |
| ; CHECK-LABEL: st1b_h_inbound: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: st1b { z0.h }, p0, [x0, #1, mul vl] |
| ; CHECK-NEXT: ret |
| %base = getelementptr <vscale x 8 x i8>, <vscale x 8 x i8>* %a, i64 1 |
| %base_scalar = bitcast <vscale x 8 x i8>* %base to ptr |
| %trunc = trunc <vscale x 8 x i16> %data to <vscale x 8 x i8> |
| call void @llvm.aarch64.sve.st1.nxv8i8(<vscale x 8 x i8> %trunc, <vscale x 8 x i1> %pg, ptr %base_scalar) |
| ret void |
| } |
| |
| define void @st1b_d_inbound(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, ptr %a) { |
| ; CHECK-LABEL: st1b_d_inbound: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: st1b { z0.d }, p0, [x0, #-7, mul vl] |
| ; CHECK-NEXT: ret |
| %base = getelementptr <vscale x 2 x i8>, <vscale x 2 x i8>* %a, i64 -7 |
| %base_scalar = bitcast <vscale x 2 x i8>* %base to ptr |
| %trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i8> |
| call void @llvm.aarch64.sve.st1.nxv2i8(<vscale x 2 x i8> %trunc, <vscale x 2 x i1> %pg, ptr %base_scalar) |
| ret void |
| } |
| |
| ; |
| ; ST1H |
| ; |
| |
| define void @st1h_inbound(<vscale x 8 x i16> %data, <vscale x 8 x i1> %pg, ptr %a) { |
| ; CHECK-LABEL: st1h_inbound: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: st1h { z0.h }, p0, [x0, #-1, mul vl] |
| ; CHECK-NEXT: ret |
| %base = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* %a, i64 -1 |
| %base_scalar = bitcast <vscale x 8 x i16>* %base to ptr |
| call void @llvm.aarch64.sve.st1.nxv8i16(<vscale x 8 x i16> %data, <vscale x 8 x i1> %pg, ptr %base_scalar) |
| ret void |
| } |
| |
| define void @st1h_f16_inbound(<vscale x 8 x half> %data, <vscale x 8 x i1> %pg, ptr %a) { |
| ; CHECK-LABEL: st1h_f16_inbound: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: st1h { z0.h }, p0, [x0, #-5, mul vl] |
| ; CHECK-NEXT: ret |
| %base = getelementptr <vscale x 8 x half>, <vscale x 8 x half>* %a, i64 -5 |
| %base_scalar = bitcast <vscale x 8 x half>* %base to ptr |
| call void @llvm.aarch64.sve.st1.nxv8f16(<vscale x 8 x half> %data, <vscale x 8 x i1> %pg, ptr %base_scalar) |
| ret void |
| } |
| |
| define void @st1h_bf16_inbound(<vscale x 8 x bfloat> %data, <vscale x 8 x i1> %pg, ptr %a) #0 { |
| ; CHECK-LABEL: st1h_bf16_inbound: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: st1h { z0.h }, p0, [x0, #-5, mul vl] |
| ; CHECK-NEXT: ret |
| %base = getelementptr <vscale x 8 x bfloat>, <vscale x 8 x bfloat>* %a, i64 -5 |
| %base_scalar = bitcast <vscale x 8 x bfloat>* %base to ptr |
| call void @llvm.aarch64.sve.st1.nxv8bf16(<vscale x 8 x bfloat> %data, <vscale x 8 x i1> %pg, ptr %base_scalar) |
| ret void |
| } |
| |
| define void @st1h_s_inbound(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, ptr %a) { |
| ; CHECK-LABEL: st1h_s_inbound: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: st1h { z0.s }, p0, [x0, #2, mul vl] |
| ; CHECK-NEXT: ret |
| %base = getelementptr <vscale x 4 x i16>, <vscale x 4 x i16>* %a, i64 2 |
| %base_scalar = bitcast <vscale x 4 x i16>* %base to ptr |
| %trunc = trunc <vscale x 4 x i32> %data to <vscale x 4 x i16> |
| call void @llvm.aarch64.sve.st1.nxv4i16(<vscale x 4 x i16> %trunc, <vscale x 4 x i1> %pg, ptr %base_scalar) |
| ret void |
| } |
| |
| define void @st1h_d_inbound(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, ptr %a) { |
| ; CHECK-LABEL: st1h_d_inbound: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: st1h { z0.d }, p0, [x0, #-4, mul vl] |
| ; CHECK-NEXT: ret |
| %base = getelementptr <vscale x 2 x i16>, <vscale x 2 x i16>* %a, i64 -4 |
| %base_scalar = bitcast <vscale x 2 x i16>* %base to ptr |
| %trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i16> |
| call void @llvm.aarch64.sve.st1.nxv2i16(<vscale x 2 x i16> %trunc, <vscale x 2 x i1> %pg, ptr %base_scalar) |
| ret void |
| } |
| |
| ; |
| ; ST1W |
| ; |
| |
| define void @st1w_inbound(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, ptr %a) { |
| ; CHECK-LABEL: st1w_inbound: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: st1w { z0.s }, p0, [x0, #6, mul vl] |
| ; CHECK-NEXT: ret |
| %base = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %a, i64 6 |
| %base_scalar = bitcast <vscale x 4 x i32>* %base to ptr |
| call void @llvm.aarch64.sve.st1.nxv4i32(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, ptr %base_scalar) |
| ret void |
| } |
| |
| define void @st1w_f32_inbound(<vscale x 4 x float> %data, <vscale x 4 x i1> %pg, ptr %a) { |
| ; CHECK-LABEL: st1w_f32_inbound: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: st1w { z0.s }, p0, [x0, #-1, mul vl] |
| ; CHECK-NEXT: ret |
| %base = getelementptr <vscale x 4 x float>, <vscale x 4 x float>* %a, i64 -1 |
| %base_scalar = bitcast <vscale x 4 x float>* %base to ptr |
| call void @llvm.aarch64.sve.st1.nxv4f32(<vscale x 4 x float> %data, <vscale x 4 x i1> %pg, ptr %base_scalar) |
| ret void |
| } |
| |
| define void @st1w_d_inbound(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, ptr %a) { |
| ; CHECK-LABEL: st1w_d_inbound: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: st1w { z0.d }, p0, [x0, #1, mul vl] |
| ; CHECK-NEXT: ret |
| %base = getelementptr <vscale x 2 x i32>, <vscale x 2 x i32>* %a, i64 1 |
| %base_scalar = bitcast <vscale x 2 x i32>* %base to ptr |
| %trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i32> |
| call void @llvm.aarch64.sve.st1.nxv2i32(<vscale x 2 x i32> %trunc, <vscale x 2 x i1> %pg, ptr %base_scalar) |
| ret void |
| } |
| |
| ; |
| ; ST1D |
| ; |
| |
| define void @st1d_inbound(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, ptr %a) { |
| ; CHECK-LABEL: st1d_inbound: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: st1d { z0.d }, p0, [x0, #5, mul vl] |
| ; CHECK-NEXT: ret |
| %base = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %a, i64 5 |
| %base_scalar = bitcast <vscale x 2 x i64>* %base to ptr |
| call void @llvm.aarch64.sve.st1.nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, ptr %base_scalar) |
| ret void |
| } |
| |
| define void @st1d_f64_inbound(<vscale x 2 x double> %data, <vscale x 2 x i1> %pg, ptr %a) { |
| ; CHECK-LABEL: st1d_f64_inbound: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: st1d { z0.d }, p0, [x0, #-8, mul vl] |
| ; CHECK-NEXT: ret |
| %base = getelementptr <vscale x 2 x double>, <vscale x 2 x double>* %a, i64 -8 |
| %base_scalar = bitcast <vscale x 2 x double>* %base to ptr |
| call void @llvm.aarch64.sve.st1.nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x i1> %pg, ptr %base_scalar) |
| ret void |
| } |
| |
| declare void @llvm.aarch64.sve.st1.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i1>, ptr) |
| |
| declare void @llvm.aarch64.sve.st1.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i1>, ptr) |
| declare void @llvm.aarch64.sve.st1.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i1>, ptr) |
| declare void @llvm.aarch64.sve.st1.nxv8f16(<vscale x 8 x half>, <vscale x 8 x i1>, ptr) |
| declare void @llvm.aarch64.sve.st1.nxv8bf16(<vscale x 8 x bfloat>, <vscale x 8 x i1>, ptr) |
| |
| declare void @llvm.aarch64.sve.st1.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i1>, ptr) |
| declare void @llvm.aarch64.sve.st1.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i1>, ptr) |
| declare void @llvm.aarch64.sve.st1.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, ptr) |
| declare void @llvm.aarch64.sve.st1.nxv4f32(<vscale x 4 x float>, <vscale x 4 x i1>, ptr) |
| |
| declare void @llvm.aarch64.sve.st1.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i1>, ptr) |
| declare void @llvm.aarch64.sve.st1.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i1>, ptr) |
| declare void @llvm.aarch64.sve.st1.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i1>, ptr) |
| declare void @llvm.aarch64.sve.st1.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, ptr) |
| declare void @llvm.aarch64.sve.st1.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, ptr) |
| |
| ; +bf16 is required for the bfloat version. |
| attributes #0 = { "target-features"="+bf16" } |