| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 |
| ; RUN: llc -mtriple=amdgcn-amd-amdhsa < %s | FileCheck -check-prefixes=SI %s |
| ; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=tonga < %s | FileCheck -check-prefixes=VI %s |
| |
| define amdgpu_kernel void @extract_vector_elt_v1i8(ptr addrspace(1) %out, <1 x i8> %foo) #0 { |
| ; SI-LABEL: extract_vector_elt_v1i8: |
| ; SI: ; %bb.0: |
| ; SI-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0 |
| ; SI-NEXT: s_load_dword s2, s[8:9], 0x2 |
| ; SI-NEXT: s_waitcnt lgkmcnt(0) |
| ; SI-NEXT: v_mov_b32_e32 v0, s0 |
| ; SI-NEXT: v_mov_b32_e32 v1, s1 |
| ; SI-NEXT: v_mov_b32_e32 v2, s2 |
| ; SI-NEXT: flat_store_byte v[0:1], v2 |
| ; SI-NEXT: s_endpgm |
| ; |
| ; VI-LABEL: extract_vector_elt_v1i8: |
| ; VI: ; %bb.0: |
| ; VI-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0 |
| ; VI-NEXT: s_load_dword s2, s[8:9], 0x8 |
| ; VI-NEXT: s_waitcnt lgkmcnt(0) |
| ; VI-NEXT: v_mov_b32_e32 v0, s0 |
| ; VI-NEXT: v_mov_b32_e32 v1, s1 |
| ; VI-NEXT: v_mov_b32_e32 v2, s2 |
| ; VI-NEXT: flat_store_byte v[0:1], v2 |
| ; VI-NEXT: s_endpgm |
| %p0 = extractelement <1 x i8> %foo, i32 0 |
| store i8 %p0, ptr addrspace(1) %out |
| ret void |
| } |
| |
| define amdgpu_kernel void @extract_vector_elt_v2i8(ptr addrspace(1) %out, <2 x i8> %foo) #0 { |
| ; SI-LABEL: extract_vector_elt_v2i8: |
| ; SI: ; %bb.0: |
| ; SI-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0 |
| ; SI-NEXT: s_load_dword s2, s[8:9], 0x2 |
| ; SI-NEXT: s_waitcnt lgkmcnt(0) |
| ; SI-NEXT: s_lshr_b32 s3, s2, 8 |
| ; SI-NEXT: v_mov_b32_e32 v0, s0 |
| ; SI-NEXT: v_mov_b32_e32 v1, s1 |
| ; SI-NEXT: v_mov_b32_e32 v2, s2 |
| ; SI-NEXT: s_add_u32 s0, s0, 1 |
| ; SI-NEXT: v_mov_b32_e32 v3, s3 |
| ; SI-NEXT: s_addc_u32 s1, s1, 0 |
| ; SI-NEXT: flat_store_byte v[0:1], v3 |
| ; SI-NEXT: s_waitcnt vmcnt(0) |
| ; SI-NEXT: v_mov_b32_e32 v0, s0 |
| ; SI-NEXT: v_mov_b32_e32 v1, s1 |
| ; SI-NEXT: flat_store_byte v[0:1], v2 |
| ; SI-NEXT: s_waitcnt vmcnt(0) |
| ; SI-NEXT: s_endpgm |
| ; |
| ; VI-LABEL: extract_vector_elt_v2i8: |
| ; VI: ; %bb.0: |
| ; VI-NEXT: s_load_dword s2, s[8:9], 0x8 |
| ; VI-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0 |
| ; VI-NEXT: s_waitcnt lgkmcnt(0) |
| ; VI-NEXT: s_lshr_b32 s3, s2, 8 |
| ; VI-NEXT: v_mov_b32_e32 v0, s0 |
| ; VI-NEXT: v_mov_b32_e32 v1, s1 |
| ; VI-NEXT: s_add_u32 s0, s0, 1 |
| ; VI-NEXT: v_mov_b32_e32 v2, s3 |
| ; VI-NEXT: s_addc_u32 s1, s1, 0 |
| ; VI-NEXT: flat_store_byte v[0:1], v2 |
| ; VI-NEXT: s_waitcnt vmcnt(0) |
| ; VI-NEXT: v_mov_b32_e32 v0, s0 |
| ; VI-NEXT: v_mov_b32_e32 v1, s1 |
| ; VI-NEXT: v_mov_b32_e32 v2, s2 |
| ; VI-NEXT: flat_store_byte v[0:1], v2 |
| ; VI-NEXT: s_waitcnt vmcnt(0) |
| ; VI-NEXT: s_endpgm |
| %p0 = extractelement <2 x i8> %foo, i32 0 |
| %p1 = extractelement <2 x i8> %foo, i32 1 |
| %out1 = getelementptr i8, ptr addrspace(1) %out, i32 1 |
| store volatile i8 %p1, ptr addrspace(1) %out |
| store volatile i8 %p0, ptr addrspace(1) %out1 |
| ret void |
| } |
| |
| define amdgpu_kernel void @extract_vector_elt_v3i8(ptr addrspace(1) %out, <3 x i8> %foo) #0 { |
| ; SI-LABEL: extract_vector_elt_v3i8: |
| ; SI: ; %bb.0: |
| ; SI-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0 |
| ; SI-NEXT: s_load_dword s2, s[8:9], 0x2 |
| ; SI-NEXT: s_waitcnt lgkmcnt(0) |
| ; SI-NEXT: s_lshr_b32 s3, s2, 16 |
| ; SI-NEXT: v_mov_b32_e32 v0, s0 |
| ; SI-NEXT: v_mov_b32_e32 v1, s1 |
| ; SI-NEXT: v_mov_b32_e32 v2, s2 |
| ; SI-NEXT: s_add_u32 s0, s0, 1 |
| ; SI-NEXT: v_mov_b32_e32 v3, s3 |
| ; SI-NEXT: s_addc_u32 s1, s1, 0 |
| ; SI-NEXT: flat_store_byte v[0:1], v3 |
| ; SI-NEXT: s_waitcnt vmcnt(0) |
| ; SI-NEXT: v_mov_b32_e32 v0, s0 |
| ; SI-NEXT: v_mov_b32_e32 v1, s1 |
| ; SI-NEXT: flat_store_byte v[0:1], v2 |
| ; SI-NEXT: s_waitcnt vmcnt(0) |
| ; SI-NEXT: s_endpgm |
| ; |
| ; VI-LABEL: extract_vector_elt_v3i8: |
| ; VI: ; %bb.0: |
| ; VI-NEXT: s_load_dword s2, s[8:9], 0x8 |
| ; VI-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0 |
| ; VI-NEXT: s_waitcnt lgkmcnt(0) |
| ; VI-NEXT: s_lshr_b32 s3, s2, 16 |
| ; VI-NEXT: v_mov_b32_e32 v0, s0 |
| ; VI-NEXT: v_mov_b32_e32 v1, s1 |
| ; VI-NEXT: s_add_u32 s0, s0, 1 |
| ; VI-NEXT: v_mov_b32_e32 v2, s3 |
| ; VI-NEXT: s_addc_u32 s1, s1, 0 |
| ; VI-NEXT: flat_store_byte v[0:1], v2 |
| ; VI-NEXT: s_waitcnt vmcnt(0) |
| ; VI-NEXT: v_mov_b32_e32 v0, s0 |
| ; VI-NEXT: v_mov_b32_e32 v1, s1 |
| ; VI-NEXT: v_mov_b32_e32 v2, s2 |
| ; VI-NEXT: flat_store_byte v[0:1], v2 |
| ; VI-NEXT: s_waitcnt vmcnt(0) |
| ; VI-NEXT: s_endpgm |
| %p0 = extractelement <3 x i8> %foo, i32 0 |
| %p1 = extractelement <3 x i8> %foo, i32 2 |
| %out1 = getelementptr i8, ptr addrspace(1) %out, i32 1 |
| store volatile i8 %p1, ptr addrspace(1) %out |
| store volatile i8 %p0, ptr addrspace(1) %out1 |
| ret void |
| } |
| |
| define amdgpu_kernel void @extract_vector_elt_v4i8(ptr addrspace(1) %out, <4 x i8> %foo) #0 { |
| ; SI-LABEL: extract_vector_elt_v4i8: |
| ; SI: ; %bb.0: |
| ; SI-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0 |
| ; SI-NEXT: s_load_dword s2, s[8:9], 0x2 |
| ; SI-NEXT: s_waitcnt lgkmcnt(0) |
| ; SI-NEXT: s_lshr_b32 s3, s2, 16 |
| ; SI-NEXT: v_mov_b32_e32 v0, s0 |
| ; SI-NEXT: v_mov_b32_e32 v1, s1 |
| ; SI-NEXT: v_mov_b32_e32 v2, s2 |
| ; SI-NEXT: s_add_u32 s0, s0, 1 |
| ; SI-NEXT: v_mov_b32_e32 v3, s3 |
| ; SI-NEXT: s_addc_u32 s1, s1, 0 |
| ; SI-NEXT: flat_store_byte v[0:1], v3 |
| ; SI-NEXT: s_waitcnt vmcnt(0) |
| ; SI-NEXT: v_mov_b32_e32 v0, s0 |
| ; SI-NEXT: v_mov_b32_e32 v1, s1 |
| ; SI-NEXT: flat_store_byte v[0:1], v2 |
| ; SI-NEXT: s_waitcnt vmcnt(0) |
| ; SI-NEXT: s_endpgm |
| ; |
| ; VI-LABEL: extract_vector_elt_v4i8: |
| ; VI: ; %bb.0: |
| ; VI-NEXT: s_load_dword s2, s[8:9], 0x8 |
| ; VI-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0 |
| ; VI-NEXT: s_waitcnt lgkmcnt(0) |
| ; VI-NEXT: s_lshr_b32 s3, s2, 16 |
| ; VI-NEXT: v_mov_b32_e32 v0, s0 |
| ; VI-NEXT: v_mov_b32_e32 v1, s1 |
| ; VI-NEXT: s_add_u32 s0, s0, 1 |
| ; VI-NEXT: v_mov_b32_e32 v2, s3 |
| ; VI-NEXT: s_addc_u32 s1, s1, 0 |
| ; VI-NEXT: flat_store_byte v[0:1], v2 |
| ; VI-NEXT: s_waitcnt vmcnt(0) |
| ; VI-NEXT: v_mov_b32_e32 v0, s0 |
| ; VI-NEXT: v_mov_b32_e32 v1, s1 |
| ; VI-NEXT: v_mov_b32_e32 v2, s2 |
| ; VI-NEXT: flat_store_byte v[0:1], v2 |
| ; VI-NEXT: s_waitcnt vmcnt(0) |
| ; VI-NEXT: s_endpgm |
| %p0 = extractelement <4 x i8> %foo, i32 0 |
| %p1 = extractelement <4 x i8> %foo, i32 2 |
| %out1 = getelementptr i8, ptr addrspace(1) %out, i32 1 |
| store volatile i8 %p1, ptr addrspace(1) %out |
| store volatile i8 %p0, ptr addrspace(1) %out1 |
| ret void |
| } |
| |
| define amdgpu_kernel void @extract_vector_elt_v8i8(<8 x i8> %foo) #0 { |
| ; SI-LABEL: extract_vector_elt_v8i8: |
| ; SI: ; %bb.0: |
| ; SI-NEXT: s_load_dword s0, s[8:9], 0x0 |
| ; SI-NEXT: s_waitcnt lgkmcnt(0) |
| ; SI-NEXT: s_lshr_b32 s1, s0, 16 |
| ; SI-NEXT: v_mov_b32_e32 v0, 0 |
| ; SI-NEXT: v_mov_b32_e32 v1, 0 |
| ; SI-NEXT: v_mov_b32_e32 v2, s0 |
| ; SI-NEXT: v_mov_b32_e32 v3, s1 |
| ; SI-NEXT: flat_store_byte v[0:1], v3 |
| ; SI-NEXT: s_waitcnt vmcnt(0) |
| ; SI-NEXT: flat_store_byte v[0:1], v2 |
| ; SI-NEXT: s_waitcnt vmcnt(0) |
| ; SI-NEXT: s_endpgm |
| ; |
| ; VI-LABEL: extract_vector_elt_v8i8: |
| ; VI: ; %bb.0: |
| ; VI-NEXT: s_load_dword s0, s[8:9], 0x0 |
| ; VI-NEXT: v_mov_b32_e32 v0, 0 |
| ; VI-NEXT: v_mov_b32_e32 v1, 0 |
| ; VI-NEXT: s_waitcnt lgkmcnt(0) |
| ; VI-NEXT: s_lshr_b32 s1, s0, 16 |
| ; VI-NEXT: v_mov_b32_e32 v3, s1 |
| ; VI-NEXT: v_mov_b32_e32 v2, s0 |
| ; VI-NEXT: flat_store_byte v[0:1], v3 |
| ; VI-NEXT: s_waitcnt vmcnt(0) |
| ; VI-NEXT: flat_store_byte v[0:1], v2 |
| ; VI-NEXT: s_waitcnt vmcnt(0) |
| ; VI-NEXT: s_endpgm |
| %p0 = extractelement <8 x i8> %foo, i32 0 |
| %p1 = extractelement <8 x i8> %foo, i32 2 |
| store volatile i8 %p1, ptr addrspace(1) null |
| store volatile i8 %p0, ptr addrspace(1) null |
| ret void |
| } |
| |
| define amdgpu_kernel void @extract_vector_elt_v16i8(ptr addrspace(1) %out, <16 x i8> %foo) #0 { |
| ; SI-LABEL: extract_vector_elt_v16i8: |
| ; SI: ; %bb.0: |
| ; SI-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0 |
| ; SI-NEXT: s_load_dword s2, s[8:9], 0x4 |
| ; SI-NEXT: s_waitcnt lgkmcnt(0) |
| ; SI-NEXT: s_lshr_b32 s3, s2, 16 |
| ; SI-NEXT: v_mov_b32_e32 v0, s0 |
| ; SI-NEXT: v_mov_b32_e32 v1, s1 |
| ; SI-NEXT: v_mov_b32_e32 v2, s2 |
| ; SI-NEXT: s_add_u32 s0, s0, 1 |
| ; SI-NEXT: v_mov_b32_e32 v3, s3 |
| ; SI-NEXT: s_addc_u32 s1, s1, 0 |
| ; SI-NEXT: flat_store_byte v[0:1], v3 |
| ; SI-NEXT: s_waitcnt vmcnt(0) |
| ; SI-NEXT: v_mov_b32_e32 v0, s0 |
| ; SI-NEXT: v_mov_b32_e32 v1, s1 |
| ; SI-NEXT: flat_store_byte v[0:1], v2 |
| ; SI-NEXT: s_waitcnt vmcnt(0) |
| ; SI-NEXT: s_endpgm |
| ; |
| ; VI-LABEL: extract_vector_elt_v16i8: |
| ; VI: ; %bb.0: |
| ; VI-NEXT: s_load_dword s2, s[8:9], 0x10 |
| ; VI-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0 |
| ; VI-NEXT: s_waitcnt lgkmcnt(0) |
| ; VI-NEXT: s_lshr_b32 s3, s2, 16 |
| ; VI-NEXT: v_mov_b32_e32 v0, s0 |
| ; VI-NEXT: v_mov_b32_e32 v1, s1 |
| ; VI-NEXT: s_add_u32 s0, s0, 1 |
| ; VI-NEXT: v_mov_b32_e32 v2, s3 |
| ; VI-NEXT: s_addc_u32 s1, s1, 0 |
| ; VI-NEXT: flat_store_byte v[0:1], v2 |
| ; VI-NEXT: s_waitcnt vmcnt(0) |
| ; VI-NEXT: v_mov_b32_e32 v0, s0 |
| ; VI-NEXT: v_mov_b32_e32 v1, s1 |
| ; VI-NEXT: v_mov_b32_e32 v2, s2 |
| ; VI-NEXT: flat_store_byte v[0:1], v2 |
| ; VI-NEXT: s_waitcnt vmcnt(0) |
| ; VI-NEXT: s_endpgm |
| %p0 = extractelement <16 x i8> %foo, i32 0 |
| %p1 = extractelement <16 x i8> %foo, i32 2 |
| %out1 = getelementptr i8, ptr addrspace(1) %out, i32 1 |
| store volatile i8 %p1, ptr addrspace(1) %out |
| store volatile i8 %p0, ptr addrspace(1) %out1 |
| ret void |
| } |
| |
| define amdgpu_kernel void @extract_vector_elt_v32i8(<32 x i8> %foo) #0 { |
| ; SI-LABEL: extract_vector_elt_v32i8: |
| ; SI: ; %bb.0: |
| ; SI-NEXT: s_load_dword s0, s[8:9], 0x0 |
| ; SI-NEXT: s_waitcnt lgkmcnt(0) |
| ; SI-NEXT: s_lshr_b32 s1, s0, 16 |
| ; SI-NEXT: v_mov_b32_e32 v0, 0 |
| ; SI-NEXT: v_mov_b32_e32 v1, 0 |
| ; SI-NEXT: v_mov_b32_e32 v2, s0 |
| ; SI-NEXT: v_mov_b32_e32 v3, s1 |
| ; SI-NEXT: flat_store_byte v[0:1], v3 |
| ; SI-NEXT: s_waitcnt vmcnt(0) |
| ; SI-NEXT: flat_store_byte v[0:1], v2 |
| ; SI-NEXT: s_waitcnt vmcnt(0) |
| ; SI-NEXT: s_endpgm |
| ; |
| ; VI-LABEL: extract_vector_elt_v32i8: |
| ; VI: ; %bb.0: |
| ; VI-NEXT: s_load_dword s0, s[8:9], 0x0 |
| ; VI-NEXT: v_mov_b32_e32 v0, 0 |
| ; VI-NEXT: v_mov_b32_e32 v1, 0 |
| ; VI-NEXT: s_waitcnt lgkmcnt(0) |
| ; VI-NEXT: s_lshr_b32 s1, s0, 16 |
| ; VI-NEXT: v_mov_b32_e32 v3, s1 |
| ; VI-NEXT: v_mov_b32_e32 v2, s0 |
| ; VI-NEXT: flat_store_byte v[0:1], v3 |
| ; VI-NEXT: s_waitcnt vmcnt(0) |
| ; VI-NEXT: flat_store_byte v[0:1], v2 |
| ; VI-NEXT: s_waitcnt vmcnt(0) |
| ; VI-NEXT: s_endpgm |
| %p0 = extractelement <32 x i8> %foo, i32 0 |
| %p1 = extractelement <32 x i8> %foo, i32 2 |
| store volatile i8 %p1, ptr addrspace(1) null |
| store volatile i8 %p0, ptr addrspace(1) null |
| ret void |
| } |
| |
| define amdgpu_kernel void @extract_vector_elt_v64i8(ptr addrspace(1) %out, <64 x i8> %foo) #0 { |
| ; SI-LABEL: extract_vector_elt_v64i8: |
| ; SI: ; %bb.0: |
| ; SI-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0 |
| ; SI-NEXT: s_load_dword s2, s[8:9], 0x10 |
| ; SI-NEXT: s_waitcnt lgkmcnt(0) |
| ; SI-NEXT: s_lshr_b32 s3, s2, 16 |
| ; SI-NEXT: v_mov_b32_e32 v0, s0 |
| ; SI-NEXT: v_mov_b32_e32 v1, s1 |
| ; SI-NEXT: v_mov_b32_e32 v2, s2 |
| ; SI-NEXT: s_add_u32 s0, s0, 1 |
| ; SI-NEXT: v_mov_b32_e32 v3, s3 |
| ; SI-NEXT: s_addc_u32 s1, s1, 0 |
| ; SI-NEXT: flat_store_byte v[0:1], v3 |
| ; SI-NEXT: s_waitcnt vmcnt(0) |
| ; SI-NEXT: v_mov_b32_e32 v0, s0 |
| ; SI-NEXT: v_mov_b32_e32 v1, s1 |
| ; SI-NEXT: flat_store_byte v[0:1], v2 |
| ; SI-NEXT: s_waitcnt vmcnt(0) |
| ; SI-NEXT: s_endpgm |
| ; |
| ; VI-LABEL: extract_vector_elt_v64i8: |
| ; VI: ; %bb.0: |
| ; VI-NEXT: s_load_dword s2, s[8:9], 0x40 |
| ; VI-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0 |
| ; VI-NEXT: s_waitcnt lgkmcnt(0) |
| ; VI-NEXT: s_lshr_b32 s3, s2, 16 |
| ; VI-NEXT: v_mov_b32_e32 v0, s0 |
| ; VI-NEXT: v_mov_b32_e32 v1, s1 |
| ; VI-NEXT: s_add_u32 s0, s0, 1 |
| ; VI-NEXT: v_mov_b32_e32 v2, s3 |
| ; VI-NEXT: s_addc_u32 s1, s1, 0 |
| ; VI-NEXT: flat_store_byte v[0:1], v2 |
| ; VI-NEXT: s_waitcnt vmcnt(0) |
| ; VI-NEXT: v_mov_b32_e32 v0, s0 |
| ; VI-NEXT: v_mov_b32_e32 v1, s1 |
| ; VI-NEXT: v_mov_b32_e32 v2, s2 |
| ; VI-NEXT: flat_store_byte v[0:1], v2 |
| ; VI-NEXT: s_waitcnt vmcnt(0) |
| ; VI-NEXT: s_endpgm |
| %p0 = extractelement <64 x i8> %foo, i32 0 |
| %p1 = extractelement <64 x i8> %foo, i32 2 |
| %out1 = getelementptr i8, ptr addrspace(1) %out, i32 1 |
| store volatile i8 %p1, ptr addrspace(1) %out |
| store volatile i8 %p0, ptr addrspace(1) %out1 |
| ret void |
| } |
| |
| ; FIXME: SI generates much worse code from that's a pain to match |
| |
| ; FIXME: 16-bit and 32-bit shift not combined after legalize to to |
| ; isTypeDesirableForOp in SimplifyDemandedBits |
| |
| define amdgpu_kernel void @dynamic_extract_vector_elt_v2i8(ptr addrspace(1) %out, [8 x i32], <2 x i8> %foo, [8 x i32], i32 %idx) #0 { |
| ; SI-LABEL: dynamic_extract_vector_elt_v2i8: |
| ; SI: ; %bb.0: |
| ; SI-NEXT: s_load_dword s2, s[8:9], 0xa |
| ; SI-NEXT: s_load_dword s3, s[8:9], 0x13 |
| ; SI-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0 |
| ; SI-NEXT: s_waitcnt lgkmcnt(0) |
| ; SI-NEXT: s_and_b32 s2, s2, 0xffff |
| ; SI-NEXT: s_lshl_b32 s3, s3, 3 |
| ; SI-NEXT: s_lshr_b32 s2, s2, s3 |
| ; SI-NEXT: v_mov_b32_e32 v0, s0 |
| ; SI-NEXT: v_mov_b32_e32 v1, s1 |
| ; SI-NEXT: v_mov_b32_e32 v2, s2 |
| ; SI-NEXT: flat_store_byte v[0:1], v2 |
| ; SI-NEXT: s_waitcnt vmcnt(0) |
| ; SI-NEXT: s_endpgm |
| ; |
| ; VI-LABEL: dynamic_extract_vector_elt_v2i8: |
| ; VI: ; %bb.0: |
| ; VI-NEXT: s_load_dword s2, s[8:9], 0x4c |
| ; VI-NEXT: s_load_dword s3, s[8:9], 0x28 |
| ; VI-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0 |
| ; VI-NEXT: s_waitcnt lgkmcnt(0) |
| ; VI-NEXT: s_lshl_b32 s2, s2, 3 |
| ; VI-NEXT: s_and_b32 s3, s3, 0xffff |
| ; VI-NEXT: s_lshr_b32 s2, s3, s2 |
| ; VI-NEXT: v_mov_b32_e32 v0, s0 |
| ; VI-NEXT: v_mov_b32_e32 v1, s1 |
| ; VI-NEXT: v_mov_b32_e32 v2, s2 |
| ; VI-NEXT: flat_store_byte v[0:1], v2 |
| ; VI-NEXT: s_waitcnt vmcnt(0) |
| ; VI-NEXT: s_endpgm |
| %elt = extractelement <2 x i8> %foo, i32 %idx |
| store volatile i8 %elt, ptr addrspace(1) %out |
| ret void |
| } |
| |
| define amdgpu_kernel void @dynamic_extract_vector_elt_v3i8(ptr addrspace(1) %out, [8 x i32], <3 x i8> %foo, [8 x i32], i32 %idx) #0 { |
| ; SI-LABEL: dynamic_extract_vector_elt_v3i8: |
| ; SI: ; %bb.0: |
| ; SI-NEXT: s_load_dword s2, s[8:9], 0x13 |
| ; SI-NEXT: s_load_dword s3, s[8:9], 0xa |
| ; SI-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0 |
| ; SI-NEXT: s_waitcnt lgkmcnt(0) |
| ; SI-NEXT: s_lshl_b32 s2, s2, 3 |
| ; SI-NEXT: s_lshr_b32 s2, s3, s2 |
| ; SI-NEXT: v_mov_b32_e32 v0, s0 |
| ; SI-NEXT: v_mov_b32_e32 v1, s1 |
| ; SI-NEXT: v_mov_b32_e32 v2, s2 |
| ; SI-NEXT: flat_store_byte v[0:1], v2 |
| ; SI-NEXT: s_waitcnt vmcnt(0) |
| ; SI-NEXT: s_endpgm |
| ; |
| ; VI-LABEL: dynamic_extract_vector_elt_v3i8: |
| ; VI: ; %bb.0: |
| ; VI-NEXT: s_load_dword s2, s[8:9], 0x4c |
| ; VI-NEXT: s_load_dword s3, s[8:9], 0x28 |
| ; VI-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0 |
| ; VI-NEXT: s_waitcnt lgkmcnt(0) |
| ; VI-NEXT: s_lshl_b32 s2, s2, 3 |
| ; VI-NEXT: s_lshr_b32 s2, s3, s2 |
| ; VI-NEXT: v_mov_b32_e32 v0, s0 |
| ; VI-NEXT: v_mov_b32_e32 v1, s1 |
| ; VI-NEXT: v_mov_b32_e32 v2, s2 |
| ; VI-NEXT: flat_store_byte v[0:1], v2 |
| ; VI-NEXT: s_waitcnt vmcnt(0) |
| ; VI-NEXT: s_endpgm |
| %p0 = extractelement <3 x i8> %foo, i32 %idx |
| %out1 = getelementptr i8, ptr addrspace(1) %out, i32 1 |
| store volatile i8 %p0, ptr addrspace(1) %out |
| ret void |
| } |
| |
| define amdgpu_kernel void @dynamic_extract_vector_elt_v4i8(ptr addrspace(1) %out, ptr addrspace(4) %vec.ptr, [8 x i32], i32 %idx) #0 { |
| ; SI-LABEL: dynamic_extract_vector_elt_v4i8: |
| ; SI: ; %bb.0: |
| ; SI-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0 |
| ; SI-NEXT: s_load_dword s4, s[8:9], 0xc |
| ; SI-NEXT: s_waitcnt lgkmcnt(0) |
| ; SI-NEXT: s_load_dword s2, s[2:3], 0x0 |
| ; SI-NEXT: s_lshl_b32 s3, s4, 3 |
| ; SI-NEXT: s_waitcnt lgkmcnt(0) |
| ; SI-NEXT: s_lshr_b32 s2, s2, s3 |
| ; SI-NEXT: v_mov_b32_e32 v0, s0 |
| ; SI-NEXT: v_mov_b32_e32 v1, s1 |
| ; SI-NEXT: v_mov_b32_e32 v2, s2 |
| ; SI-NEXT: flat_store_byte v[0:1], v2 |
| ; SI-NEXT: s_waitcnt vmcnt(0) |
| ; SI-NEXT: s_endpgm |
| ; |
| ; VI-LABEL: dynamic_extract_vector_elt_v4i8: |
| ; VI: ; %bb.0: |
| ; VI-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0 |
| ; VI-NEXT: s_load_dword s4, s[8:9], 0x30 |
| ; VI-NEXT: s_waitcnt lgkmcnt(0) |
| ; VI-NEXT: s_load_dword s2, s[2:3], 0x0 |
| ; VI-NEXT: v_mov_b32_e32 v0, s0 |
| ; VI-NEXT: s_lshl_b32 s0, s4, 3 |
| ; VI-NEXT: v_mov_b32_e32 v1, s1 |
| ; VI-NEXT: s_waitcnt lgkmcnt(0) |
| ; VI-NEXT: s_lshr_b32 s0, s2, s0 |
| ; VI-NEXT: v_mov_b32_e32 v2, s0 |
| ; VI-NEXT: flat_store_byte v[0:1], v2 |
| ; VI-NEXT: s_waitcnt vmcnt(0) |
| ; VI-NEXT: s_endpgm |
| %vec = load <4 x i8>, ptr addrspace(4) %vec.ptr |
| %p0 = extractelement <4 x i8> %vec, i32 %idx |
| %out1 = getelementptr i8, ptr addrspace(1) %out, i32 1 |
| store volatile i8 %p0, ptr addrspace(1) %out |
| ret void |
| } |
| |
| define amdgpu_kernel void @dynamic_extract_vector_elt_v8i8(ptr addrspace(1) %out, ptr addrspace(4) %vec.ptr, i32 %idx) #0 { |
| ; SI-LABEL: dynamic_extract_vector_elt_v8i8: |
| ; SI: ; %bb.0: |
| ; SI-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0 |
| ; SI-NEXT: s_load_dword s4, s[8:9], 0x4 |
| ; SI-NEXT: s_waitcnt lgkmcnt(0) |
| ; SI-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x0 |
| ; SI-NEXT: s_lshl_b32 s4, s4, 3 |
| ; SI-NEXT: s_waitcnt lgkmcnt(0) |
| ; SI-NEXT: s_lshr_b64 s[2:3], s[2:3], s4 |
| ; SI-NEXT: v_mov_b32_e32 v0, s0 |
| ; SI-NEXT: v_mov_b32_e32 v1, s1 |
| ; SI-NEXT: v_mov_b32_e32 v2, s2 |
| ; SI-NEXT: flat_store_byte v[0:1], v2 |
| ; SI-NEXT: s_waitcnt vmcnt(0) |
| ; SI-NEXT: s_endpgm |
| ; |
| ; VI-LABEL: dynamic_extract_vector_elt_v8i8: |
| ; VI: ; %bb.0: |
| ; VI-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0 |
| ; VI-NEXT: s_load_dword s4, s[8:9], 0x10 |
| ; VI-NEXT: s_waitcnt lgkmcnt(0) |
| ; VI-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x0 |
| ; VI-NEXT: v_mov_b32_e32 v0, s0 |
| ; VI-NEXT: s_lshl_b32 s0, s4, 3 |
| ; VI-NEXT: v_mov_b32_e32 v1, s1 |
| ; VI-NEXT: s_waitcnt lgkmcnt(0) |
| ; VI-NEXT: s_lshr_b64 s[0:1], s[2:3], s0 |
| ; VI-NEXT: v_mov_b32_e32 v2, s0 |
| ; VI-NEXT: flat_store_byte v[0:1], v2 |
| ; VI-NEXT: s_waitcnt vmcnt(0) |
| ; VI-NEXT: s_endpgm |
| %vec = load <8 x i8>, ptr addrspace(4) %vec.ptr |
| %p0 = extractelement <8 x i8> %vec, i32 %idx |
| %out1 = getelementptr i8, ptr addrspace(1) %out, i32 1 |
| store volatile i8 %p0, ptr addrspace(1) %out |
| ret void |
| } |
| |
| define amdgpu_kernel void @reduce_load_vector_v8i8_extract_0123() #0 { |
| ; SI-LABEL: reduce_load_vector_v8i8_extract_0123: |
| ; SI: ; %bb.0: |
| ; SI-NEXT: s_mov_b64 s[0:1], 0 |
| ; SI-NEXT: s_load_dword s0, s[0:1], 0x0 |
| ; SI-NEXT: s_waitcnt lgkmcnt(0) |
| ; SI-NEXT: s_lshr_b32 s1, s0, 8 |
| ; SI-NEXT: s_lshr_b32 s2, s0, 16 |
| ; SI-NEXT: s_lshr_b32 s3, s0, 24 |
| ; SI-NEXT: v_mov_b32_e32 v0, s0 |
| ; SI-NEXT: flat_store_byte v[0:1], v0 |
| ; SI-NEXT: s_waitcnt vmcnt(0) |
| ; SI-NEXT: v_mov_b32_e32 v0, s1 |
| ; SI-NEXT: flat_store_byte v[0:1], v0 |
| ; SI-NEXT: s_waitcnt vmcnt(0) |
| ; SI-NEXT: v_mov_b32_e32 v0, s2 |
| ; SI-NEXT: flat_store_byte v[0:1], v0 |
| ; SI-NEXT: s_waitcnt vmcnt(0) |
| ; SI-NEXT: v_mov_b32_e32 v0, s3 |
| ; SI-NEXT: flat_store_byte v[0:1], v0 |
| ; SI-NEXT: s_waitcnt vmcnt(0) |
| ; SI-NEXT: s_endpgm |
| ; |
| ; VI-LABEL: reduce_load_vector_v8i8_extract_0123: |
| ; VI: ; %bb.0: |
| ; VI-NEXT: s_mov_b64 s[0:1], 0 |
| ; VI-NEXT: s_load_dword s0, s[0:1], 0x0 |
| ; VI-NEXT: s_waitcnt lgkmcnt(0) |
| ; VI-NEXT: s_lshr_b32 s1, s0, 8 |
| ; VI-NEXT: v_mov_b32_e32 v0, s0 |
| ; VI-NEXT: s_lshr_b32 s2, s0, 16 |
| ; VI-NEXT: flat_store_byte v[0:1], v0 |
| ; VI-NEXT: s_waitcnt vmcnt(0) |
| ; VI-NEXT: v_mov_b32_e32 v0, s1 |
| ; VI-NEXT: s_lshr_b32 s3, s0, 24 |
| ; VI-NEXT: flat_store_byte v[0:1], v0 |
| ; VI-NEXT: s_waitcnt vmcnt(0) |
| ; VI-NEXT: v_mov_b32_e32 v0, s2 |
| ; VI-NEXT: flat_store_byte v[0:1], v0 |
| ; VI-NEXT: s_waitcnt vmcnt(0) |
| ; VI-NEXT: v_mov_b32_e32 v0, s3 |
| ; VI-NEXT: flat_store_byte v[0:1], v0 |
| ; VI-NEXT: s_waitcnt vmcnt(0) |
| ; VI-NEXT: s_endpgm |
| %load = load <8 x i8>, ptr addrspace(4) null |
| %elt0 = extractelement <8 x i8> %load, i32 0 |
| %elt1 = extractelement <8 x i8> %load, i32 1 |
| %elt2 = extractelement <8 x i8> %load, i32 2 |
| %elt3 = extractelement <8 x i8> %load, i32 3 |
| store volatile i8 %elt0, ptr addrspace(1) undef, align 1 |
| store volatile i8 %elt1, ptr addrspace(1) undef, align 1 |
| store volatile i8 %elt2, ptr addrspace(1) undef, align 1 |
| store volatile i8 %elt3, ptr addrspace(1) undef, align 1 |
| ret void |
| } |
| |
| define amdgpu_kernel void @reduce_load_vector_v8i8_extract_0145() #0 { |
| ; SI-LABEL: reduce_load_vector_v8i8_extract_0145: |
| ; SI: ; %bb.0: |
| ; SI-NEXT: s_mov_b64 s[0:1], 0 |
| ; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0 |
| ; SI-NEXT: s_waitcnt lgkmcnt(0) |
| ; SI-NEXT: s_lshr_b32 s2, s0, 8 |
| ; SI-NEXT: s_lshr_b32 s3, s1, 8 |
| ; SI-NEXT: v_mov_b32_e32 v0, s0 |
| ; SI-NEXT: flat_store_byte v[0:1], v0 |
| ; SI-NEXT: s_waitcnt vmcnt(0) |
| ; SI-NEXT: v_mov_b32_e32 v0, s1 |
| ; SI-NEXT: v_mov_b32_e32 v1, s2 |
| ; SI-NEXT: flat_store_byte v[0:1], v1 |
| ; SI-NEXT: s_waitcnt vmcnt(0) |
| ; SI-NEXT: flat_store_byte v[0:1], v0 |
| ; SI-NEXT: s_waitcnt vmcnt(0) |
| ; SI-NEXT: v_mov_b32_e32 v0, s3 |
| ; SI-NEXT: flat_store_byte v[0:1], v0 |
| ; SI-NEXT: s_waitcnt vmcnt(0) |
| ; SI-NEXT: s_endpgm |
| ; |
| ; VI-LABEL: reduce_load_vector_v8i8_extract_0145: |
| ; VI: ; %bb.0: |
| ; VI-NEXT: s_mov_b64 s[0:1], 0 |
| ; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0 |
| ; VI-NEXT: s_waitcnt lgkmcnt(0) |
| ; VI-NEXT: s_lshr_b32 s2, s0, 8 |
| ; VI-NEXT: v_mov_b32_e32 v0, s0 |
| ; VI-NEXT: v_mov_b32_e32 v1, s1 |
| ; VI-NEXT: s_lshr_b32 s3, s1, 8 |
| ; VI-NEXT: flat_store_byte v[0:1], v0 |
| ; VI-NEXT: s_waitcnt vmcnt(0) |
| ; VI-NEXT: v_mov_b32_e32 v0, s2 |
| ; VI-NEXT: flat_store_byte v[0:1], v0 |
| ; VI-NEXT: s_waitcnt vmcnt(0) |
| ; VI-NEXT: flat_store_byte v[0:1], v1 |
| ; VI-NEXT: s_waitcnt vmcnt(0) |
| ; VI-NEXT: v_mov_b32_e32 v0, s3 |
| ; VI-NEXT: flat_store_byte v[0:1], v0 |
| ; VI-NEXT: s_waitcnt vmcnt(0) |
| ; VI-NEXT: s_endpgm |
| %load = load <8 x i8>, ptr addrspace(4) null |
| %elt0 = extractelement <8 x i8> %load, i32 0 |
| %elt1 = extractelement <8 x i8> %load, i32 1 |
| %elt4 = extractelement <8 x i8> %load, i32 4 |
| %elt5 = extractelement <8 x i8> %load, i32 5 |
| store volatile i8 %elt0, ptr addrspace(1) undef, align 1 |
| store volatile i8 %elt1, ptr addrspace(1) undef, align 1 |
| store volatile i8 %elt4, ptr addrspace(1) undef, align 1 |
| store volatile i8 %elt5, ptr addrspace(1) undef, align 1 |
| ret void |
| } |
| |
| define amdgpu_kernel void @reduce_load_vector_v8i8_extract_45() #0 { |
| ; SI-LABEL: reduce_load_vector_v8i8_extract_45: |
| ; SI: ; %bb.0: |
| ; SI-NEXT: s_mov_b64 s[0:1], 4 |
| ; SI-NEXT: s_load_dword s0, s[0:1], 0x0 |
| ; SI-NEXT: s_waitcnt lgkmcnt(0) |
| ; SI-NEXT: s_lshr_b32 s1, s0, 8 |
| ; SI-NEXT: v_mov_b32_e32 v0, s0 |
| ; SI-NEXT: flat_store_byte v[0:1], v0 |
| ; SI-NEXT: s_waitcnt vmcnt(0) |
| ; SI-NEXT: v_mov_b32_e32 v0, s1 |
| ; SI-NEXT: flat_store_byte v[0:1], v0 |
| ; SI-NEXT: s_waitcnt vmcnt(0) |
| ; SI-NEXT: s_endpgm |
| ; |
| ; VI-LABEL: reduce_load_vector_v8i8_extract_45: |
| ; VI: ; %bb.0: |
| ; VI-NEXT: s_mov_b64 s[0:1], 4 |
| ; VI-NEXT: s_load_dword s0, s[0:1], 0x0 |
| ; VI-NEXT: s_waitcnt lgkmcnt(0) |
| ; VI-NEXT: s_lshr_b32 s1, s0, 8 |
| ; VI-NEXT: v_mov_b32_e32 v0, s0 |
| ; VI-NEXT: flat_store_byte v[0:1], v0 |
| ; VI-NEXT: s_waitcnt vmcnt(0) |
| ; VI-NEXT: v_mov_b32_e32 v0, s1 |
| ; VI-NEXT: flat_store_byte v[0:1], v0 |
| ; VI-NEXT: s_waitcnt vmcnt(0) |
| ; VI-NEXT: s_endpgm |
| %load = load <8 x i8>, ptr addrspace(4) null |
| %elt4 = extractelement <8 x i8> %load, i32 4 |
| %elt5 = extractelement <8 x i8> %load, i32 5 |
| store volatile i8 %elt4, ptr addrspace(1) undef, align 1 |
| store volatile i8 %elt5, ptr addrspace(1) undef, align 1 |
| ret void |
| } |
| |
| ; FIXME: ought to be able to eliminate high half of load |
| define amdgpu_kernel void @reduce_load_vector_v16i8_extract_0145() #0 { |
| ; SI-LABEL: reduce_load_vector_v16i8_extract_0145: |
| ; SI: ; %bb.0: |
| ; SI-NEXT: s_mov_b64 s[0:1], 0 |
| ; SI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x0 |
| ; SI-NEXT: s_waitcnt lgkmcnt(0) |
| ; SI-NEXT: s_lshr_b32 s2, s0, 8 |
| ; SI-NEXT: s_lshr_b32 s3, s1, 8 |
| ; SI-NEXT: v_mov_b32_e32 v0, s0 |
| ; SI-NEXT: flat_store_byte v[0:1], v0 |
| ; SI-NEXT: s_waitcnt vmcnt(0) |
| ; SI-NEXT: v_mov_b32_e32 v0, s1 |
| ; SI-NEXT: v_mov_b32_e32 v1, s2 |
| ; SI-NEXT: flat_store_byte v[0:1], v1 |
| ; SI-NEXT: s_waitcnt vmcnt(0) |
| ; SI-NEXT: flat_store_byte v[0:1], v0 |
| ; SI-NEXT: s_waitcnt vmcnt(0) |
| ; SI-NEXT: v_mov_b32_e32 v0, s3 |
| ; SI-NEXT: flat_store_byte v[0:1], v0 |
| ; SI-NEXT: s_waitcnt vmcnt(0) |
| ; SI-NEXT: s_endpgm |
| ; |
| ; VI-LABEL: reduce_load_vector_v16i8_extract_0145: |
| ; VI: ; %bb.0: |
| ; VI-NEXT: s_mov_b64 s[0:1], 0 |
| ; VI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x0 |
| ; VI-NEXT: s_waitcnt lgkmcnt(0) |
| ; VI-NEXT: s_lshr_b32 s2, s0, 8 |
| ; VI-NEXT: v_mov_b32_e32 v0, s0 |
| ; VI-NEXT: v_mov_b32_e32 v1, s1 |
| ; VI-NEXT: s_lshr_b32 s3, s1, 8 |
| ; VI-NEXT: flat_store_byte v[0:1], v0 |
| ; VI-NEXT: s_waitcnt vmcnt(0) |
| ; VI-NEXT: v_mov_b32_e32 v0, s2 |
| ; VI-NEXT: flat_store_byte v[0:1], v0 |
| ; VI-NEXT: s_waitcnt vmcnt(0) |
| ; VI-NEXT: flat_store_byte v[0:1], v1 |
| ; VI-NEXT: s_waitcnt vmcnt(0) |
| ; VI-NEXT: v_mov_b32_e32 v0, s3 |
| ; VI-NEXT: flat_store_byte v[0:1], v0 |
| ; VI-NEXT: s_waitcnt vmcnt(0) |
| ; VI-NEXT: s_endpgm |
| %load = load <16 x i8>, ptr addrspace(4) null |
| %elt0 = extractelement <16 x i8> %load, i32 0 |
| %elt1 = extractelement <16 x i8> %load, i32 1 |
| %elt4 = extractelement <16 x i8> %load, i32 4 |
| %elt5 = extractelement <16 x i8> %load, i32 5 |
| store volatile i8 %elt0, ptr addrspace(1) undef, align 1 |
| store volatile i8 %elt1, ptr addrspace(1) undef, align 1 |
| store volatile i8 %elt4, ptr addrspace(1) undef, align 1 |
| store volatile i8 %elt5, ptr addrspace(1) undef, align 1 |
| ret void |
| } |
| |
| attributes #0 = { nounwind } |