| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 |
| ; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 < %s | FileCheck %s |
| |
| define amdgpu_kernel void @lshl1_add(ptr addrspace(1) %in, ptr addrspace(7) %in2) { |
| ; CHECK-LABEL: lshl1_add: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_clause 0x1 |
| ; CHECK-NEXT: s_load_b64 s[6:7], s[4:5], 0x24 |
| ; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x44 |
| ; CHECK-NEXT: v_mov_b32_e32 v0, 0 |
| ; CHECK-NEXT: s_wait_kmcnt 0x0 |
| ; CHECK-NEXT: global_load_b32 v1, v0, s[6:7] scope:SCOPE_SYS |
| ; CHECK-NEXT: s_wait_loadcnt 0x0 |
| ; CHECK-NEXT: s_load_b32 s7, s[4:5], 0x54 |
| ; CHECK-NEXT: s_mov_b32 s4, s1 |
| ; CHECK-NEXT: s_mov_b32 s5, s2 |
| ; CHECK-NEXT: s_mov_b32 s6, s3 |
| ; CHECK-NEXT: v_lshl_add_u32 v1, v1, 1, s0 |
| ; CHECK-NEXT: s_wait_kmcnt 0x0 |
| ; CHECK-NEXT: buffer_store_b16 v0, v1, s[4:7], null offen |
| ; CHECK-NEXT: s_endpgm |
| %vaddr = load volatile i32, ptr addrspace(1) %in, align 4 |
| %1 = sext i32 %vaddr to i64 |
| %gep = getelementptr i16, ptr addrspace(7) %in2, i64 %1 |
| store i16 0, ptr addrspace(7) %gep, align 2 |
| ret void |
| } |
| |
| define amdgpu_kernel void @lshl2_add(ptr addrspace(1) %in, ptr addrspace(7) %in2) { |
| ; CHECK-LABEL: lshl2_add: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_clause 0x1 |
| ; CHECK-NEXT: s_load_b64 s[6:7], s[4:5], 0x24 |
| ; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x44 |
| ; CHECK-NEXT: v_mov_b32_e32 v0, 0 |
| ; CHECK-NEXT: s_wait_kmcnt 0x0 |
| ; CHECK-NEXT: global_load_b32 v1, v0, s[6:7] scope:SCOPE_SYS |
| ; CHECK-NEXT: s_wait_loadcnt 0x0 |
| ; CHECK-NEXT: s_load_b32 s7, s[4:5], 0x54 |
| ; CHECK-NEXT: s_mov_b32 s4, s1 |
| ; CHECK-NEXT: s_mov_b32 s5, s2 |
| ; CHECK-NEXT: s_mov_b32 s6, s3 |
| ; CHECK-NEXT: v_lshl_add_u32 v1, v1, 2, s0 |
| ; CHECK-NEXT: s_wait_kmcnt 0x0 |
| ; CHECK-NEXT: buffer_store_b32 v0, v1, s[4:7], null offen |
| ; CHECK-NEXT: s_endpgm |
| %vaddr = load volatile i32, ptr addrspace(1) %in, align 4 |
| %1 = sext i32 %vaddr to i64 |
| %gep = getelementptr i32, ptr addrspace(7) %in2, i64 %1 |
| store i32 0, ptr addrspace(7) %gep, align 4 |
| ret void |
| } |
| |
| define amdgpu_kernel void @lshl3_add(ptr addrspace(1) %in, ptr addrspace(7) %in2) { |
| ; CHECK-LABEL: lshl3_add: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_clause 0x1 |
| ; CHECK-NEXT: s_load_b64 s[6:7], s[4:5], 0x24 |
| ; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x44 |
| ; CHECK-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0 |
| ; CHECK-NEXT: s_wait_kmcnt 0x0 |
| ; CHECK-NEXT: global_load_b32 v2, v0, s[6:7] scope:SCOPE_SYS |
| ; CHECK-NEXT: s_wait_loadcnt 0x0 |
| ; CHECK-NEXT: s_load_b32 s7, s[4:5], 0x54 |
| ; CHECK-NEXT: v_mov_b32_e32 v0, 0 |
| ; CHECK-NEXT: s_mov_b32 s4, s1 |
| ; CHECK-NEXT: s_mov_b32 s5, s2 |
| ; CHECK-NEXT: s_mov_b32 s6, s3 |
| ; CHECK-NEXT: v_lshl_add_u32 v2, v2, 3, s0 |
| ; CHECK-NEXT: s_wait_kmcnt 0x0 |
| ; CHECK-NEXT: buffer_store_b64 v[0:1], v2, s[4:7], null offen |
| ; CHECK-NEXT: s_endpgm |
| %vaddr = load volatile i32, ptr addrspace(1) %in, align 4 |
| %1 = sext i32 %vaddr to i64 |
| %gep = getelementptr i64, ptr addrspace(7) %in2, i64 %1 |
| store i64 0, ptr addrspace(7) %gep, align 8 |
| ret void |
| } |
| |
| define amdgpu_kernel void @lshl4_add(ptr addrspace(1) %in, ptr addrspace(7) %in2) { |
| ; CHECK-LABEL: lshl4_add: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_clause 0x1 |
| ; CHECK-NEXT: s_load_b64 s[6:7], s[4:5], 0x24 |
| ; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x44 |
| ; CHECK-NEXT: v_mov_b32_e32 v0, 0 |
| ; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1) |
| ; CHECK-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v2, v0 |
| ; CHECK-NEXT: s_wait_kmcnt 0x0 |
| ; CHECK-NEXT: global_load_b32 v3, v0, s[6:7] scope:SCOPE_SYS |
| ; CHECK-NEXT: s_wait_loadcnt 0x0 |
| ; CHECK-NEXT: s_load_b32 s7, s[4:5], 0x54 |
| ; CHECK-NEXT: s_mov_b32 s4, s1 |
| ; CHECK-NEXT: s_mov_b32 s5, s2 |
| ; CHECK-NEXT: s_mov_b32 s6, s3 |
| ; CHECK-NEXT: v_lshl_add_u32 v4, v3, 4, s0 |
| ; CHECK-NEXT: v_mov_b32_e32 v3, v0 |
| ; CHECK-NEXT: s_wait_kmcnt 0x0 |
| ; CHECK-NEXT: buffer_store_b128 v[0:3], v4, s[4:7], null offen |
| ; CHECK-NEXT: s_endpgm |
| %vaddr = load volatile i32, ptr addrspace(1) %in, align 4 |
| %1 = sext i32 %vaddr to i64 |
| %gep = getelementptr i128, ptr addrspace(7) %in2, i64 %1 |
| store i128 0, ptr addrspace(7) %gep, align 16 |
| ret void |
| } |