| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 |
| ; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=kaveri -earlycse-debug-hash -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s |
| |
| define amdgpu_kernel void @v_sad_u32_pat1(ptr addrspace(1) %out, i32 %a, i32 %b, i32 %c) { |
| ; GCN-LABEL: v_sad_u32_pat1: |
| ; GCN: ; %bb.0: |
| ; GCN-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x2 |
| ; GCN-NEXT: s_load_dwordx2 s[4:5], s[8:9], 0x0 |
| ; GCN-NEXT: s_waitcnt lgkmcnt(0) |
| ; GCN-NEXT: v_mov_b32_e32 v0, s1 |
| ; GCN-NEXT: v_mov_b32_e32 v1, s2 |
| ; GCN-NEXT: v_sad_u32 v2, s0, v0, v1 |
| ; GCN-NEXT: v_mov_b32_e32 v0, s4 |
| ; GCN-NEXT: v_mov_b32_e32 v1, s5 |
| ; GCN-NEXT: flat_store_dword v[0:1], v2 |
| ; GCN-NEXT: s_endpgm |
| %icmp0 = icmp ugt i32 %a, %b |
| %t0 = select i1 %icmp0, i32 %a, i32 %b |
| |
| %icmp1 = icmp ule i32 %a, %b |
| %t1 = select i1 %icmp1, i32 %a, i32 %b |
| |
| %ret0 = sub i32 %t0, %t1 |
| %ret = add i32 %ret0, %c |
| |
| store i32 %ret, ptr addrspace(1) %out |
| ret void |
| } |
| |
| define amdgpu_kernel void @v_sad_u32_constant_pat1(ptr addrspace(1) %out, i32 %a) { |
| ; GCN-LABEL: v_sad_u32_constant_pat1: |
| ; GCN: ; %bb.0: |
| ; GCN-NEXT: s_load_dword s2, s[8:9], 0x2 |
| ; GCN-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0 |
| ; GCN-NEXT: v_mov_b32_e32 v0, 0x5a |
| ; GCN-NEXT: s_waitcnt lgkmcnt(0) |
| ; GCN-NEXT: v_sad_u32 v2, s2, v0, 20 |
| ; GCN-NEXT: v_mov_b32_e32 v0, s0 |
| ; GCN-NEXT: v_mov_b32_e32 v1, s1 |
| ; GCN-NEXT: flat_store_dword v[0:1], v2 |
| ; GCN-NEXT: s_endpgm |
| %icmp0 = icmp ugt i32 %a, 90 |
| %t0 = select i1 %icmp0, i32 %a, i32 90 |
| |
| %icmp1 = icmp ule i32 %a, 90 |
| %t1 = select i1 %icmp1, i32 %a, i32 90 |
| |
| %ret0 = sub i32 %t0, %t1 |
| %ret = add i32 %ret0, 20 |
| |
| store i32 %ret, ptr addrspace(1) %out |
| ret void |
| } |
| |
| define amdgpu_kernel void @v_sad_u32_pat2(ptr addrspace(1) %out, i32 %a, i32 %b, i32 %c) { |
| ; GCN-LABEL: v_sad_u32_pat2: |
| ; GCN: ; %bb.0: |
| ; GCN-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x2 |
| ; GCN-NEXT: s_load_dwordx2 s[4:5], s[8:9], 0x0 |
| ; GCN-NEXT: s_waitcnt lgkmcnt(0) |
| ; GCN-NEXT: v_mov_b32_e32 v0, s1 |
| ; GCN-NEXT: v_mov_b32_e32 v1, s2 |
| ; GCN-NEXT: v_sad_u32 v2, s0, v0, v1 |
| ; GCN-NEXT: v_mov_b32_e32 v0, s4 |
| ; GCN-NEXT: v_mov_b32_e32 v1, s5 |
| ; GCN-NEXT: flat_store_dword v[0:1], v2 |
| ; GCN-NEXT: s_endpgm |
| %icmp0 = icmp ugt i32 %a, %b |
| %sub0 = sub i32 %a, %b |
| %sub1 = sub i32 %b, %a |
| %ret0 = select i1 %icmp0, i32 %sub0, i32 %sub1 |
| |
| %ret = add i32 %ret0, %c |
| |
| store i32 %ret, ptr addrspace(1) %out |
| ret void |
| } |
| |
| define amdgpu_kernel void @v_sad_u32_multi_use_sub_pat1(ptr addrspace(1) %out, i32 %a, i32 %b, i32 %c) { |
| ; GCN-LABEL: v_sad_u32_multi_use_sub_pat1: |
| ; GCN: ; %bb.0: |
| ; GCN-NEXT: s_mov_b64 s[18:19], s[2:3] |
| ; GCN-NEXT: s_mov_b64 s[16:17], s[0:1] |
| ; GCN-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x2 |
| ; GCN-NEXT: s_load_dwordx2 s[4:5], s[8:9], 0x0 |
| ; GCN-NEXT: s_add_u32 s16, s16, s15 |
| ; GCN-NEXT: s_addc_u32 s17, s17, 0 |
| ; GCN-NEXT: s_waitcnt lgkmcnt(0) |
| ; GCN-NEXT: s_min_u32 s3, s0, s1 |
| ; GCN-NEXT: s_max_u32 s0, s0, s1 |
| ; GCN-NEXT: s_sub_i32 s0, s0, s3 |
| ; GCN-NEXT: v_mov_b32_e32 v0, s4 |
| ; GCN-NEXT: v_mov_b32_e32 v2, s0 |
| ; GCN-NEXT: s_add_i32 s0, s0, s2 |
| ; GCN-NEXT: v_mov_b32_e32 v1, s5 |
| ; GCN-NEXT: buffer_store_dword v2, v0, s[16:19], 0 offen |
| ; GCN-NEXT: s_waitcnt vmcnt(0) |
| ; GCN-NEXT: v_mov_b32_e32 v2, s0 |
| ; GCN-NEXT: flat_store_dword v[0:1], v2 |
| ; GCN-NEXT: s_endpgm |
| %icmp0 = icmp ugt i32 %a, %b |
| %t0 = select i1 %icmp0, i32 %a, i32 %b |
| |
| %icmp1 = icmp ule i32 %a, %b |
| %t1 = select i1 %icmp1, i32 %a, i32 %b |
| |
| %ret0 = sub i32 %t0, %t1 |
| store volatile i32 %ret0, ptr addrspace(5) undef |
| %ret = add i32 %ret0, %c |
| |
| store i32 %ret, ptr addrspace(1) %out |
| ret void |
| } |
| |
| define amdgpu_kernel void @v_sad_u32_multi_use_add_pat1(ptr addrspace(1) %out, i32 %a, i32 %b, i32 %c) { |
| ; GCN-LABEL: v_sad_u32_multi_use_add_pat1: |
| ; GCN: ; %bb.0: |
| ; GCN-NEXT: s_mov_b64 s[18:19], s[2:3] |
| ; GCN-NEXT: s_mov_b64 s[16:17], s[0:1] |
| ; GCN-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x2 |
| ; GCN-NEXT: s_load_dwordx2 s[4:5], s[8:9], 0x0 |
| ; GCN-NEXT: s_add_u32 s16, s16, s15 |
| ; GCN-NEXT: s_addc_u32 s17, s17, 0 |
| ; GCN-NEXT: s_waitcnt lgkmcnt(0) |
| ; GCN-NEXT: v_mov_b32_e32 v2, s1 |
| ; GCN-NEXT: v_mov_b32_e32 v3, s2 |
| ; GCN-NEXT: v_mov_b32_e32 v0, s4 |
| ; GCN-NEXT: v_mov_b32_e32 v1, s5 |
| ; GCN-NEXT: v_sad_u32 v2, s0, v2, v3 |
| ; GCN-NEXT: buffer_store_dword v2, v0, s[16:19], 0 offen |
| ; GCN-NEXT: s_waitcnt vmcnt(0) |
| ; GCN-NEXT: flat_store_dword v[0:1], v2 |
| ; GCN-NEXT: s_endpgm |
| %icmp0 = icmp ugt i32 %a, %b |
| %t0 = select i1 %icmp0, i32 %a, i32 %b |
| |
| %icmp1 = icmp ule i32 %a, %b |
| %t1 = select i1 %icmp1, i32 %a, i32 %b |
| |
| %ret0 = sub i32 %t0, %t1 |
| %ret = add i32 %ret0, %c |
| store volatile i32 %ret, ptr addrspace(5) undef |
| store i32 %ret, ptr addrspace(1) %out |
| ret void |
| } |
| |
| define amdgpu_kernel void @v_sad_u32_multi_use_max_pat1(ptr addrspace(1) %out, i32 %a, i32 %b, i32 %c) { |
| ; GCN-LABEL: v_sad_u32_multi_use_max_pat1: |
| ; GCN: ; %bb.0: |
| ; GCN-NEXT: s_mov_b64 s[18:19], s[2:3] |
| ; GCN-NEXT: s_mov_b64 s[16:17], s[0:1] |
| ; GCN-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x2 |
| ; GCN-NEXT: s_load_dwordx2 s[4:5], s[8:9], 0x0 |
| ; GCN-NEXT: s_add_u32 s16, s16, s15 |
| ; GCN-NEXT: s_addc_u32 s17, s17, 0 |
| ; GCN-NEXT: s_waitcnt lgkmcnt(0) |
| ; GCN-NEXT: s_max_u32 s3, s0, s1 |
| ; GCN-NEXT: v_mov_b32_e32 v0, s1 |
| ; GCN-NEXT: v_mov_b32_e32 v1, s2 |
| ; GCN-NEXT: v_mov_b32_e32 v2, s3 |
| ; GCN-NEXT: v_sad_u32 v3, s0, v0, v1 |
| ; GCN-NEXT: buffer_store_dword v2, v0, s[16:19], 0 offen |
| ; GCN-NEXT: s_waitcnt vmcnt(0) |
| ; GCN-NEXT: v_mov_b32_e32 v0, s4 |
| ; GCN-NEXT: v_mov_b32_e32 v1, s5 |
| ; GCN-NEXT: flat_store_dword v[0:1], v3 |
| ; GCN-NEXT: s_endpgm |
| %icmp0 = icmp ugt i32 %a, %b |
| %t0 = select i1 %icmp0, i32 %a, i32 %b |
| store volatile i32 %t0, ptr addrspace(5) undef |
| |
| %icmp1 = icmp ule i32 %a, %b |
| %t1 = select i1 %icmp1, i32 %a, i32 %b |
| |
| %ret0 = sub i32 %t0, %t1 |
| %ret = add i32 %ret0, %c |
| |
| store i32 %ret, ptr addrspace(1) %out |
| ret void |
| } |
| |
| define amdgpu_kernel void @v_sad_u32_multi_use_min_pat1(ptr addrspace(1) %out, i32 %a, i32 %b, i32 %c) { |
| ; GCN-LABEL: v_sad_u32_multi_use_min_pat1: |
| ; GCN: ; %bb.0: |
| ; GCN-NEXT: s_mov_b64 s[18:19], s[2:3] |
| ; GCN-NEXT: s_mov_b64 s[16:17], s[0:1] |
| ; GCN-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x2 |
| ; GCN-NEXT: s_load_dwordx2 s[4:5], s[8:9], 0x0 |
| ; GCN-NEXT: s_add_u32 s16, s16, s15 |
| ; GCN-NEXT: s_addc_u32 s17, s17, 0 |
| ; GCN-NEXT: s_waitcnt lgkmcnt(0) |
| ; GCN-NEXT: s_min_u32 s3, s0, s1 |
| ; GCN-NEXT: v_mov_b32_e32 v0, s1 |
| ; GCN-NEXT: v_mov_b32_e32 v1, s2 |
| ; GCN-NEXT: v_mov_b32_e32 v2, s3 |
| ; GCN-NEXT: v_sad_u32 v3, s0, v0, v1 |
| ; GCN-NEXT: buffer_store_dword v2, v0, s[16:19], 0 offen |
| ; GCN-NEXT: s_waitcnt vmcnt(0) |
| ; GCN-NEXT: v_mov_b32_e32 v0, s4 |
| ; GCN-NEXT: v_mov_b32_e32 v1, s5 |
| ; GCN-NEXT: flat_store_dword v[0:1], v3 |
| ; GCN-NEXT: s_endpgm |
| %icmp0 = icmp ugt i32 %a, %b |
| %t0 = select i1 %icmp0, i32 %a, i32 %b |
| |
| %icmp1 = icmp ule i32 %a, %b |
| %t1 = select i1 %icmp1, i32 %a, i32 %b |
| |
| store volatile i32 %t1, ptr addrspace(5) undef |
| |
| %ret0 = sub i32 %t0, %t1 |
| %ret = add i32 %ret0, %c |
| |
| store i32 %ret, ptr addrspace(1) %out |
| ret void |
| } |
| |
| define amdgpu_kernel void @v_sad_u32_multi_use_sub_pat2(ptr addrspace(1) %out, i32 %a, i32 %b, i32 %c) { |
| ; GCN-LABEL: v_sad_u32_multi_use_sub_pat2: |
| ; GCN: ; %bb.0: |
| ; GCN-NEXT: s_mov_b64 s[18:19], s[2:3] |
| ; GCN-NEXT: s_mov_b64 s[16:17], s[0:1] |
| ; GCN-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x2 |
| ; GCN-NEXT: s_load_dwordx2 s[4:5], s[8:9], 0x0 |
| ; GCN-NEXT: s_add_u32 s16, s16, s15 |
| ; GCN-NEXT: s_addc_u32 s17, s17, 0 |
| ; GCN-NEXT: s_waitcnt lgkmcnt(0) |
| ; GCN-NEXT: s_sub_i32 s3, s0, s1 |
| ; GCN-NEXT: v_mov_b32_e32 v0, s1 |
| ; GCN-NEXT: v_mov_b32_e32 v1, s2 |
| ; GCN-NEXT: v_mov_b32_e32 v2, s3 |
| ; GCN-NEXT: v_sad_u32 v3, s0, v0, v1 |
| ; GCN-NEXT: buffer_store_dword v2, v0, s[16:19], 0 offen |
| ; GCN-NEXT: s_waitcnt vmcnt(0) |
| ; GCN-NEXT: v_mov_b32_e32 v0, s4 |
| ; GCN-NEXT: v_mov_b32_e32 v1, s5 |
| ; GCN-NEXT: flat_store_dword v[0:1], v3 |
| ; GCN-NEXT: s_endpgm |
| %icmp0 = icmp ugt i32 %a, %b |
| %sub0 = sub i32 %a, %b |
| store volatile i32 %sub0, ptr addrspace(5) undef |
| %sub1 = sub i32 %b, %a |
| %ret0 = select i1 %icmp0, i32 %sub0, i32 %sub1 |
| |
| %ret = add i32 %ret0, %c |
| |
| store i32 %ret, ptr addrspace(1) %out |
| ret void |
| } |
| |
| define amdgpu_kernel void @v_sad_u32_multi_use_select_pat2(ptr addrspace(1) %out, i32 %a, i32 %b, i32 %c) { |
| ; GCN-LABEL: v_sad_u32_multi_use_select_pat2: |
| ; GCN: ; %bb.0: |
| ; GCN-NEXT: s_mov_b64 s[18:19], s[2:3] |
| ; GCN-NEXT: s_mov_b64 s[16:17], s[0:1] |
| ; GCN-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x2 |
| ; GCN-NEXT: s_load_dwordx2 s[4:5], s[8:9], 0x0 |
| ; GCN-NEXT: s_add_u32 s16, s16, s15 |
| ; GCN-NEXT: s_addc_u32 s17, s17, 0 |
| ; GCN-NEXT: s_waitcnt lgkmcnt(0) |
| ; GCN-NEXT: s_min_u32 s3, s0, s1 |
| ; GCN-NEXT: s_max_u32 s0, s0, s1 |
| ; GCN-NEXT: s_sub_i32 s0, s0, s3 |
| ; GCN-NEXT: v_mov_b32_e32 v0, s4 |
| ; GCN-NEXT: v_mov_b32_e32 v2, s0 |
| ; GCN-NEXT: s_add_i32 s0, s0, s2 |
| ; GCN-NEXT: v_mov_b32_e32 v1, s5 |
| ; GCN-NEXT: buffer_store_dword v2, v0, s[16:19], 0 offen |
| ; GCN-NEXT: s_waitcnt vmcnt(0) |
| ; GCN-NEXT: v_mov_b32_e32 v2, s0 |
| ; GCN-NEXT: flat_store_dword v[0:1], v2 |
| ; GCN-NEXT: s_endpgm |
| %icmp0 = icmp ugt i32 %a, %b |
| %sub0 = sub i32 %a, %b |
| %sub1 = sub i32 %b, %a |
| %ret0 = select i1 %icmp0, i32 %sub0, i32 %sub1 |
| store volatile i32 %ret0, ptr addrspace(5) undef |
| |
| %ret = add i32 %ret0, %c |
| |
| store i32 %ret, ptr addrspace(1) %out |
| ret void |
| } |
| |
| define amdgpu_kernel void @v_sad_u32_vector_pat1(ptr addrspace(1) %out, <4 x i32> %a, <4 x i32> %b, <4 x i32> %c) { |
| ; GCN-LABEL: v_sad_u32_vector_pat1: |
| ; GCN: ; %bb.0: |
| ; GCN-NEXT: s_load_dwordx8 s[0:7], s[8:9], 0x4 |
| ; GCN-NEXT: s_load_dwordx4 s[12:15], s[8:9], 0xc |
| ; GCN-NEXT: s_load_dwordx2 s[8:9], s[8:9], 0x0 |
| ; GCN-NEXT: s_waitcnt lgkmcnt(0) |
| ; GCN-NEXT: v_mov_b32_e32 v0, s7 |
| ; GCN-NEXT: v_mov_b32_e32 v1, s15 |
| ; GCN-NEXT: v_mov_b32_e32 v2, s6 |
| ; GCN-NEXT: v_sad_u32 v3, s3, v0, v1 |
| ; GCN-NEXT: v_mov_b32_e32 v0, s14 |
| ; GCN-NEXT: v_sad_u32 v2, s2, v2, v0 |
| ; GCN-NEXT: v_mov_b32_e32 v0, s5 |
| ; GCN-NEXT: v_mov_b32_e32 v1, s13 |
| ; GCN-NEXT: v_sad_u32 v1, s1, v0, v1 |
| ; GCN-NEXT: v_mov_b32_e32 v0, s4 |
| ; GCN-NEXT: v_mov_b32_e32 v4, s12 |
| ; GCN-NEXT: v_sad_u32 v0, s0, v0, v4 |
| ; GCN-NEXT: v_mov_b32_e32 v4, s8 |
| ; GCN-NEXT: v_mov_b32_e32 v5, s9 |
| ; GCN-NEXT: flat_store_dwordx4 v[4:5], v[0:3] |
| ; GCN-NEXT: s_endpgm |
| %icmp0 = icmp ugt <4 x i32> %a, %b |
| %t0 = select <4 x i1> %icmp0, <4 x i32> %a, <4 x i32> %b |
| |
| %icmp1 = icmp ule <4 x i32> %a, %b |
| %t1 = select <4 x i1> %icmp1, <4 x i32> %a, <4 x i32> %b |
| |
| %ret0 = sub <4 x i32> %t0, %t1 |
| %ret = add <4 x i32> %ret0, %c |
| |
| store <4 x i32> %ret, ptr addrspace(1) %out |
| ret void |
| } |
| |
| define amdgpu_kernel void @v_sad_u32_vector_pat2(ptr addrspace(1) %out, <4 x i32> %a, <4 x i32> %b, <4 x i32> %c) { |
| ; GCN-LABEL: v_sad_u32_vector_pat2: |
| ; GCN: ; %bb.0: |
| ; GCN-NEXT: s_load_dwordx8 s[0:7], s[8:9], 0x4 |
| ; GCN-NEXT: s_load_dwordx4 s[12:15], s[8:9], 0xc |
| ; GCN-NEXT: s_load_dwordx2 s[8:9], s[8:9], 0x0 |
| ; GCN-NEXT: s_waitcnt lgkmcnt(0) |
| ; GCN-NEXT: v_mov_b32_e32 v0, s7 |
| ; GCN-NEXT: v_mov_b32_e32 v1, s15 |
| ; GCN-NEXT: v_mov_b32_e32 v2, s6 |
| ; GCN-NEXT: v_sad_u32 v3, s3, v0, v1 |
| ; GCN-NEXT: v_mov_b32_e32 v0, s14 |
| ; GCN-NEXT: v_sad_u32 v2, s2, v2, v0 |
| ; GCN-NEXT: v_mov_b32_e32 v0, s5 |
| ; GCN-NEXT: v_mov_b32_e32 v1, s13 |
| ; GCN-NEXT: v_sad_u32 v1, s1, v0, v1 |
| ; GCN-NEXT: v_mov_b32_e32 v0, s4 |
| ; GCN-NEXT: v_mov_b32_e32 v4, s12 |
| ; GCN-NEXT: v_sad_u32 v0, s0, v0, v4 |
| ; GCN-NEXT: v_mov_b32_e32 v4, s8 |
| ; GCN-NEXT: v_mov_b32_e32 v5, s9 |
| ; GCN-NEXT: flat_store_dwordx4 v[4:5], v[0:3] |
| ; GCN-NEXT: s_endpgm |
| %icmp0 = icmp ugt <4 x i32> %a, %b |
| %sub0 = sub <4 x i32> %a, %b |
| %sub1 = sub <4 x i32> %b, %a |
| %ret0 = select <4 x i1> %icmp0, <4 x i32> %sub0, <4 x i32> %sub1 |
| |
| %ret = add <4 x i32> %ret0, %c |
| |
| store <4 x i32> %ret, ptr addrspace(1) %out |
| ret void |
| } |
| |
| define amdgpu_kernel void @v_sad_u32_i16_pat1(ptr addrspace(1) %out, i16 %a, i16 %b, i16 %c) { |
| ; GCN-LABEL: v_sad_u32_i16_pat1: |
| ; GCN: ; %bb.0: |
| ; GCN-NEXT: s_load_dword s4, s[8:9], 0x2 |
| ; GCN-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x2 |
| ; GCN-NEXT: s_load_dwordx2 s[2:3], s[8:9], 0x0 |
| ; GCN-NEXT: s_waitcnt lgkmcnt(0) |
| ; GCN-NEXT: s_and_b32 s4, s4, 0xffff |
| ; GCN-NEXT: s_lshr_b32 s0, s0, 16 |
| ; GCN-NEXT: v_mov_b32_e32 v0, s1 |
| ; GCN-NEXT: v_mov_b32_e32 v1, s0 |
| ; GCN-NEXT: v_sad_u32 v2, s4, v1, v0 |
| ; GCN-NEXT: v_mov_b32_e32 v0, s2 |
| ; GCN-NEXT: v_mov_b32_e32 v1, s3 |
| ; GCN-NEXT: flat_store_short v[0:1], v2 |
| ; GCN-NEXT: s_endpgm |
| %icmp0 = icmp ugt i16 %a, %b |
| %t0 = select i1 %icmp0, i16 %a, i16 %b |
| |
| %icmp1 = icmp ule i16 %a, %b |
| %t1 = select i1 %icmp1, i16 %a, i16 %b |
| |
| %ret0 = sub i16 %t0, %t1 |
| %ret = add i16 %ret0, %c |
| |
| store i16 %ret, ptr addrspace(1) %out |
| ret void |
| } |
| |
| define amdgpu_kernel void @v_sad_u32_i16_pat2(ptr addrspace(1) %out) { |
| ; GCN-LABEL: v_sad_u32_i16_pat2: |
| ; GCN: ; %bb.0: |
| ; GCN-NEXT: flat_load_ushort v0, v[0:1] glc |
| ; GCN-NEXT: s_waitcnt vmcnt(0) |
| ; GCN-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0 |
| ; GCN-NEXT: flat_load_ushort v1, v[0:1] glc |
| ; GCN-NEXT: s_waitcnt vmcnt(0) |
| ; GCN-NEXT: flat_load_ushort v2, v[0:1] glc |
| ; GCN-NEXT: s_waitcnt vmcnt(0) |
| ; GCN-NEXT: v_sad_u32 v2, v0, v1, v2 |
| ; GCN-NEXT: s_waitcnt lgkmcnt(0) |
| ; GCN-NEXT: v_mov_b32_e32 v0, s0 |
| ; GCN-NEXT: v_mov_b32_e32 v1, s1 |
| ; GCN-NEXT: flat_store_short v[0:1], v2 |
| ; GCN-NEXT: s_endpgm |
| %a = load volatile i16, ptr addrspace(1) undef |
| %b = load volatile i16, ptr addrspace(1) undef |
| %c = load volatile i16, ptr addrspace(1) undef |
| %icmp0 = icmp ugt i16 %a, %b |
| %sub0 = sub i16 %a, %b |
| %sub1 = sub i16 %b, %a |
| %ret0 = select i1 %icmp0, i16 %sub0, i16 %sub1 |
| |
| %ret = add i16 %ret0, %c |
| |
| store i16 %ret, ptr addrspace(1) %out |
| ret void |
| } |
| |
| define amdgpu_kernel void @v_sad_u32_i8_pat1(ptr addrspace(1) %out, i8 %a, i8 %b, i8 %c) { |
| ; GCN-LABEL: v_sad_u32_i8_pat1: |
| ; GCN: ; %bb.0: |
| ; GCN-NEXT: s_load_dword s2, s[8:9], 0x2 |
| ; GCN-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0 |
| ; GCN-NEXT: s_waitcnt lgkmcnt(0) |
| ; GCN-NEXT: s_and_b32 s3, s2, 0xff |
| ; GCN-NEXT: s_bfe_u32 s4, s2, 0x80008 |
| ; GCN-NEXT: s_lshr_b32 s2, s2, 16 |
| ; GCN-NEXT: v_mov_b32_e32 v0, s4 |
| ; GCN-NEXT: v_mov_b32_e32 v1, s2 |
| ; GCN-NEXT: v_sad_u32 v2, s3, v0, v1 |
| ; GCN-NEXT: v_mov_b32_e32 v0, s0 |
| ; GCN-NEXT: v_mov_b32_e32 v1, s1 |
| ; GCN-NEXT: flat_store_byte v[0:1], v2 |
| ; GCN-NEXT: s_endpgm |
| %icmp0 = icmp ugt i8 %a, %b |
| %t0 = select i1 %icmp0, i8 %a, i8 %b |
| |
| %icmp1 = icmp ule i8 %a, %b |
| %t1 = select i1 %icmp1, i8 %a, i8 %b |
| |
| %ret0 = sub i8 %t0, %t1 |
| %ret = add i8 %ret0, %c |
| |
| store i8 %ret, ptr addrspace(1) %out |
| ret void |
| } |
| |
| define amdgpu_kernel void @v_sad_u32_i8_pat2(ptr addrspace(1) %out) { |
| ; GCN-LABEL: v_sad_u32_i8_pat2: |
| ; GCN: ; %bb.0: |
| ; GCN-NEXT: flat_load_ubyte v0, v[0:1] glc |
| ; GCN-NEXT: s_waitcnt vmcnt(0) |
| ; GCN-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0 |
| ; GCN-NEXT: flat_load_ubyte v1, v[0:1] glc |
| ; GCN-NEXT: s_waitcnt vmcnt(0) |
| ; GCN-NEXT: flat_load_ubyte v2, v[0:1] glc |
| ; GCN-NEXT: s_waitcnt vmcnt(0) |
| ; GCN-NEXT: v_sad_u32 v2, v0, v1, v2 |
| ; GCN-NEXT: s_waitcnt lgkmcnt(0) |
| ; GCN-NEXT: v_mov_b32_e32 v0, s0 |
| ; GCN-NEXT: v_mov_b32_e32 v1, s1 |
| ; GCN-NEXT: flat_store_byte v[0:1], v2 |
| ; GCN-NEXT: s_endpgm |
| %a = load volatile i8, ptr addrspace(1) undef |
| %b = load volatile i8, ptr addrspace(1) undef |
| %c = load volatile i8, ptr addrspace(1) undef |
| %icmp0 = icmp ugt i8 %a, %b |
| %sub0 = sub i8 %a, %b |
| %sub1 = sub i8 %b, %a |
| %ret0 = select i1 %icmp0, i8 %sub0, i8 %sub1 |
| |
| %ret = add i8 %ret0, %c |
| |
| store i8 %ret, ptr addrspace(1) %out |
| ret void |
| } |
| |
| define amdgpu_kernel void @s_sad_u32_i8_pat2(ptr addrspace(1) %out, i8 zeroext %a, i8 zeroext %b, i8 zeroext %c) { |
| ; GCN-LABEL: s_sad_u32_i8_pat2: |
| ; GCN: ; %bb.0: |
| ; GCN-NEXT: s_load_dword s2, s[8:9], 0x2 |
| ; GCN-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0 |
| ; GCN-NEXT: s_waitcnt lgkmcnt(0) |
| ; GCN-NEXT: s_and_b32 s3, s2, 0xff |
| ; GCN-NEXT: s_bfe_u32 s4, s2, 0x80008 |
| ; GCN-NEXT: s_lshr_b32 s2, s2, 16 |
| ; GCN-NEXT: v_mov_b32_e32 v0, s4 |
| ; GCN-NEXT: v_mov_b32_e32 v1, s2 |
| ; GCN-NEXT: v_sad_u32 v2, s3, v0, v1 |
| ; GCN-NEXT: v_mov_b32_e32 v0, s0 |
| ; GCN-NEXT: v_mov_b32_e32 v1, s1 |
| ; GCN-NEXT: flat_store_byte v[0:1], v2 |
| ; GCN-NEXT: s_endpgm |
| %icmp0 = icmp ugt i8 %a, %b |
| %sub0 = sub i8 %a, %b |
| %sub1 = sub i8 %b, %a |
| %ret0 = select i1 %icmp0, i8 %sub0, i8 %sub1 |
| |
| %ret = add i8 %ret0, %c |
| |
| store i8 %ret, ptr addrspace(1) %out |
| ret void |
| } |
| |
| define amdgpu_kernel void @v_sad_u32_mismatched_operands_pat1(ptr addrspace(1) %out, i32 %a, i32 %b, i32 %c, i32 %d) { |
| ; GCN-LABEL: v_sad_u32_mismatched_operands_pat1: |
| ; GCN: ; %bb.0: |
| ; GCN-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x2 |
| ; GCN-NEXT: s_load_dwordx2 s[4:5], s[8:9], 0x0 |
| ; GCN-NEXT: s_waitcnt lgkmcnt(0) |
| ; GCN-NEXT: s_max_u32 s6, s0, s1 |
| ; GCN-NEXT: s_cmp_le_u32 s0, s1 |
| ; GCN-NEXT: s_cselect_b32 s0, s0, s3 |
| ; GCN-NEXT: s_sub_i32 s0, s6, s0 |
| ; GCN-NEXT: s_add_i32 s0, s0, s2 |
| ; GCN-NEXT: v_mov_b32_e32 v0, s4 |
| ; GCN-NEXT: v_mov_b32_e32 v1, s5 |
| ; GCN-NEXT: v_mov_b32_e32 v2, s0 |
| ; GCN-NEXT: flat_store_dword v[0:1], v2 |
| ; GCN-NEXT: s_endpgm |
| %icmp0 = icmp ugt i32 %a, %b |
| %t0 = select i1 %icmp0, i32 %a, i32 %b |
| |
| %icmp1 = icmp ule i32 %a, %b |
| %t1 = select i1 %icmp1, i32 %a, i32 %d |
| |
| %ret0 = sub i32 %t0, %t1 |
| %ret = add i32 %ret0, %c |
| |
| store i32 %ret, ptr addrspace(1) %out |
| ret void |
| } |
| |
| define amdgpu_kernel void @v_sad_u32_mismatched_operands_pat2(ptr addrspace(1) %out, i32 %a, i32 %b, i32 %c, i32 %d) { |
| ; GCN-LABEL: v_sad_u32_mismatched_operands_pat2: |
| ; GCN: ; %bb.0: |
| ; GCN-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x2 |
| ; GCN-NEXT: s_load_dwordx2 s[4:5], s[8:9], 0x0 |
| ; GCN-NEXT: s_waitcnt lgkmcnt(0) |
| ; GCN-NEXT: s_sub_i32 s3, s0, s3 |
| ; GCN-NEXT: s_sub_i32 s6, s1, s0 |
| ; GCN-NEXT: s_cmp_lt_u32 s1, s0 |
| ; GCN-NEXT: s_cselect_b32 s0, s3, s6 |
| ; GCN-NEXT: s_add_i32 s0, s0, s2 |
| ; GCN-NEXT: v_mov_b32_e32 v0, s4 |
| ; GCN-NEXT: v_mov_b32_e32 v1, s5 |
| ; GCN-NEXT: v_mov_b32_e32 v2, s0 |
| ; GCN-NEXT: flat_store_dword v[0:1], v2 |
| ; GCN-NEXT: s_endpgm |
| %icmp0 = icmp ugt i32 %a, %b |
| %sub0 = sub i32 %a, %d |
| %sub1 = sub i32 %b, %a |
| %ret0 = select i1 %icmp0, i32 %sub0, i32 %sub1 |
| |
| %ret = add i32 %ret0, %c |
| |
| store i32 %ret, ptr addrspace(1) %out |
| ret void |
| } |
| |