|  | ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py | 
|  | ; RUN: llc -mtriple=amdgcn -mcpu=gfx908 < %s | FileCheck --check-prefix=GCN %s | 
|  |  | 
|  | define amdgpu_kernel void @divergent_or3_b32(ptr addrspace(1) %arg) { | 
|  | ; GCN-LABEL: divergent_or3_b32: | 
|  | ; GCN:       ; %bb.0: ; %bb | 
|  | ; GCN-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24 | 
|  | ; GCN-NEXT:    v_lshlrev_b32_e32 v3, 4, v0 | 
|  | ; GCN-NEXT:    s_waitcnt lgkmcnt(0) | 
|  | ; GCN-NEXT:    global_load_dwordx3 v[0:2], v3, s[0:1] | 
|  | ; GCN-NEXT:    s_waitcnt vmcnt(0) | 
|  | ; GCN-NEXT:    v_or3_b32 v0, v1, v0, v2 | 
|  | ; GCN-NEXT:    v_not_b32_e32 v0, v0 | 
|  | ; GCN-NEXT:    global_store_dword v3, v0, s[0:1] | 
|  | ; GCN-NEXT:    s_endpgm | 
|  | bb: | 
|  | %i = tail call i32 @llvm.amdgcn.workitem.id.x() | 
|  | %i1 = zext i32 %i to i64 | 
|  | %i2 = getelementptr inbounds <3 x i32>, ptr addrspace(1) %arg, i64 %i1 | 
|  | %i3 = load <3 x i32>, ptr addrspace(1) %i2, align 16 | 
|  | %i4 = extractelement <3 x i32> %i3, i64 0 | 
|  | %i5 = extractelement <3 x i32> %i3, i64 1 | 
|  | %i6 = extractelement <3 x i32> %i3, i64 2 | 
|  | %i7 = or i32 %i5, %i4 | 
|  | %i8 = or i32 %i7, %i6 | 
|  | %i9 = xor i32 %i8, -1 | 
|  | store i32 %i9, ptr addrspace(1) %i2, align 16 | 
|  | ret void | 
|  | } | 
|  |  | 
|  | define amdgpu_kernel void @divergent_or3_b64(ptr addrspace(1) %arg) { | 
|  | ; GCN-LABEL: divergent_or3_b64: | 
|  | ; GCN:       ; %bb.0: ; %bb | 
|  | ; GCN-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24 | 
|  | ; GCN-NEXT:    v_lshlrev_b32_e32 v6, 5, v0 | 
|  | ; GCN-NEXT:    s_waitcnt lgkmcnt(0) | 
|  | ; GCN-NEXT:    global_load_dwordx2 v[4:5], v6, s[0:1] offset:16 | 
|  | ; GCN-NEXT:    global_load_dwordx4 v[0:3], v6, s[0:1] | 
|  | ; GCN-NEXT:    s_waitcnt vmcnt(0) | 
|  | ; GCN-NEXT:    v_or3_b32 v1, v3, v1, v5 | 
|  | ; GCN-NEXT:    v_or3_b32 v0, v2, v0, v4 | 
|  | ; GCN-NEXT:    v_not_b32_e32 v1, v1 | 
|  | ; GCN-NEXT:    v_not_b32_e32 v0, v0 | 
|  | ; GCN-NEXT:    global_store_dwordx2 v6, v[0:1], s[0:1] | 
|  | ; GCN-NEXT:    s_endpgm | 
|  | bb: | 
|  | %i = tail call i32 @llvm.amdgcn.workitem.id.x() | 
|  | %i1 = zext i32 %i to i64 | 
|  | %i2 = getelementptr inbounds <3 x i64>, ptr addrspace(1) %arg, i64 %i1 | 
|  | %i3 = load <3 x i64>, ptr addrspace(1) %i2, align 32 | 
|  | %i4 = extractelement <3 x i64> %i3, i64 0 | 
|  | %i5 = extractelement <3 x i64> %i3, i64 1 | 
|  | %i6 = extractelement <3 x i64> %i3, i64 2 | 
|  | %i7 = or i64 %i5, %i4 | 
|  | %i8 = or i64 %i7, %i6 | 
|  | %i9 = xor i64 %i8, -1 | 
|  | store i64 %i9, ptr addrspace(1) %i2, align 32 | 
|  | ret void | 
|  | } | 
|  |  | 
|  | define amdgpu_kernel void @divergent_and3_b32(ptr addrspace(1) %arg) { | 
|  | ; GCN-LABEL: divergent_and3_b32: | 
|  | ; GCN:       ; %bb.0: ; %bb | 
|  | ; GCN-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24 | 
|  | ; GCN-NEXT:    v_lshlrev_b32_e32 v3, 4, v0 | 
|  | ; GCN-NEXT:    s_waitcnt lgkmcnt(0) | 
|  | ; GCN-NEXT:    global_load_dwordx3 v[0:2], v3, s[0:1] | 
|  | ; GCN-NEXT:    s_waitcnt vmcnt(0) | 
|  | ; GCN-NEXT:    v_and_b32_e32 v0, v1, v0 | 
|  | ; GCN-NEXT:    v_and_b32_e32 v0, v0, v2 | 
|  | ; GCN-NEXT:    v_not_b32_e32 v0, v0 | 
|  | ; GCN-NEXT:    global_store_dword v3, v0, s[0:1] | 
|  | ; GCN-NEXT:    s_endpgm | 
|  | bb: | 
|  | %i = tail call i32 @llvm.amdgcn.workitem.id.x() | 
|  | %i1 = zext i32 %i to i64 | 
|  | %i2 = getelementptr inbounds <3 x i32>, ptr addrspace(1) %arg, i64 %i1 | 
|  | %i3 = load <3 x i32>, ptr addrspace(1) %i2, align 16 | 
|  | %i4 = extractelement <3 x i32> %i3, i64 0 | 
|  | %i5 = extractelement <3 x i32> %i3, i64 1 | 
|  | %i6 = extractelement <3 x i32> %i3, i64 2 | 
|  | %i7 = and i32 %i5, %i4 | 
|  | %i8 = and i32 %i7, %i6 | 
|  | %i9 = xor i32 %i8, -1 | 
|  | store i32 %i9, ptr addrspace(1) %i2, align 16 | 
|  | ret void | 
|  | } | 
|  |  | 
|  | define amdgpu_kernel void @divergent_and3_b64(ptr addrspace(1) %arg) { | 
|  | ; GCN-LABEL: divergent_and3_b64: | 
|  | ; GCN:       ; %bb.0: ; %bb | 
|  | ; GCN-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24 | 
|  | ; GCN-NEXT:    v_lshlrev_b32_e32 v6, 5, v0 | 
|  | ; GCN-NEXT:    s_waitcnt lgkmcnt(0) | 
|  | ; GCN-NEXT:    global_load_dwordx4 v[0:3], v6, s[0:1] | 
|  | ; GCN-NEXT:    global_load_dwordx2 v[4:5], v6, s[0:1] offset:16 | 
|  | ; GCN-NEXT:    s_waitcnt vmcnt(1) | 
|  | ; GCN-NEXT:    v_and_b32_e32 v1, v3, v1 | 
|  | ; GCN-NEXT:    v_and_b32_e32 v0, v2, v0 | 
|  | ; GCN-NEXT:    s_waitcnt vmcnt(0) | 
|  | ; GCN-NEXT:    v_and_b32_e32 v1, v1, v5 | 
|  | ; GCN-NEXT:    v_and_b32_e32 v0, v0, v4 | 
|  | ; GCN-NEXT:    v_not_b32_e32 v1, v1 | 
|  | ; GCN-NEXT:    v_not_b32_e32 v0, v0 | 
|  | ; GCN-NEXT:    global_store_dwordx2 v6, v[0:1], s[0:1] | 
|  | ; GCN-NEXT:    s_endpgm | 
|  | bb: | 
|  | %i = tail call i32 @llvm.amdgcn.workitem.id.x() | 
|  | %i1 = zext i32 %i to i64 | 
|  | %i2 = getelementptr inbounds <3 x i64>, ptr addrspace(1) %arg, i64 %i1 | 
|  | %i3 = load <3 x i64>, ptr addrspace(1) %i2, align 32 | 
|  | %i4 = extractelement <3 x i64> %i3, i64 0 | 
|  | %i5 = extractelement <3 x i64> %i3, i64 1 | 
|  | %i6 = extractelement <3 x i64> %i3, i64 2 | 
|  | %i7 = and i64 %i5, %i4 | 
|  | %i8 = and i64 %i7, %i6 | 
|  | %i9 = xor i64 %i8, -1 | 
|  | store i64 %i9, ptr addrspace(1) %i2, align 32 | 
|  | ret void | 
|  | } | 
|  |  | 
|  | define amdgpu_kernel void @divergent_xor3_b32(ptr addrspace(1) %arg) { | 
|  | ; GCN-LABEL: divergent_xor3_b32: | 
|  | ; GCN:       ; %bb.0: ; %bb | 
|  | ; GCN-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24 | 
|  | ; GCN-NEXT:    v_lshlrev_b32_e32 v3, 4, v0 | 
|  | ; GCN-NEXT:    s_waitcnt lgkmcnt(0) | 
|  | ; GCN-NEXT:    global_load_dwordx3 v[0:2], v3, s[0:1] | 
|  | ; GCN-NEXT:    s_waitcnt vmcnt(0) | 
|  | ; GCN-NEXT:    v_xor_b32_e32 v0, v1, v0 | 
|  | ; GCN-NEXT:    v_xnor_b32_e32 v0, v0, v2 | 
|  | ; GCN-NEXT:    global_store_dword v3, v0, s[0:1] | 
|  | ; GCN-NEXT:    s_endpgm | 
|  | bb: | 
|  | %i = tail call i32 @llvm.amdgcn.workitem.id.x() | 
|  | %i1 = zext i32 %i to i64 | 
|  | %i2 = getelementptr inbounds <3 x i32>, ptr addrspace(1) %arg, i64 %i1 | 
|  | %i3 = load <3 x i32>, ptr addrspace(1) %i2, align 16 | 
|  | %i4 = extractelement <3 x i32> %i3, i64 0 | 
|  | %i5 = extractelement <3 x i32> %i3, i64 1 | 
|  | %i6 = extractelement <3 x i32> %i3, i64 2 | 
|  | %i7 = xor i32 %i5, %i4 | 
|  | %i8 = xor i32 %i7, %i6 | 
|  | %i9 = xor i32 %i8, -1 | 
|  | store i32 %i9, ptr addrspace(1) %i2, align 16 | 
|  | ret void | 
|  | } | 
|  |  | 
|  | define amdgpu_kernel void @divergent_xor3_b64(ptr addrspace(1) %arg) { | 
|  | ; GCN-LABEL: divergent_xor3_b64: | 
|  | ; GCN:       ; %bb.0: ; %bb | 
|  | ; GCN-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24 | 
|  | ; GCN-NEXT:    v_lshlrev_b32_e32 v6, 5, v0 | 
|  | ; GCN-NEXT:    s_waitcnt lgkmcnt(0) | 
|  | ; GCN-NEXT:    global_load_dwordx4 v[0:3], v6, s[0:1] | 
|  | ; GCN-NEXT:    global_load_dwordx2 v[4:5], v6, s[0:1] offset:16 | 
|  | ; GCN-NEXT:    s_waitcnt vmcnt(1) | 
|  | ; GCN-NEXT:    v_xor_b32_e32 v1, v3, v1 | 
|  | ; GCN-NEXT:    v_xor_b32_e32 v0, v2, v0 | 
|  | ; GCN-NEXT:    s_waitcnt vmcnt(0) | 
|  | ; GCN-NEXT:    v_xnor_b32_e32 v1, v1, v5 | 
|  | ; GCN-NEXT:    v_xnor_b32_e32 v0, v0, v4 | 
|  | ; GCN-NEXT:    global_store_dwordx2 v6, v[0:1], s[0:1] | 
|  | ; GCN-NEXT:    s_endpgm | 
|  | bb: | 
|  | %i = tail call i32 @llvm.amdgcn.workitem.id.x() | 
|  | %i1 = zext i32 %i to i64 | 
|  | %i2 = getelementptr inbounds <3 x i64>, ptr addrspace(1) %arg, i64 %i1 | 
|  | %i3 = load <3 x i64>, ptr addrspace(1) %i2, align 32 | 
|  | %i4 = extractelement <3 x i64> %i3, i64 0 | 
|  | %i5 = extractelement <3 x i64> %i3, i64 1 | 
|  | %i6 = extractelement <3 x i64> %i3, i64 2 | 
|  | %i7 = xor i64 %i5, %i4 | 
|  | %i8 = xor i64 %i7, %i6 | 
|  | %i9 = xor i64 %i8, -1 | 
|  | store i64 %i9, ptr addrspace(1) %i2, align 32 | 
|  | ret void | 
|  | } | 
|  |  | 
|  | define amdgpu_kernel void @uniform_or3_b32(ptr addrspace(1) %arg) { | 
|  | ; GCN-LABEL: uniform_or3_b32: | 
|  | ; GCN:       ; %bb.0: ; %bb | 
|  | ; GCN-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x24 | 
|  | ; GCN-NEXT:    v_mov_b32_e32 v0, 0 | 
|  | ; GCN-NEXT:    s_waitcnt lgkmcnt(0) | 
|  | ; GCN-NEXT:    s_load_dwordx4 s[0:3], s[6:7], 0x0 | 
|  | ; GCN-NEXT:    s_waitcnt lgkmcnt(0) | 
|  | ; GCN-NEXT:    s_or_b32 s0, s1, s0 | 
|  | ; GCN-NEXT:    s_nor_b32 s0, s0, s2 | 
|  | ; GCN-NEXT:    v_mov_b32_e32 v1, s0 | 
|  | ; GCN-NEXT:    global_store_dword v0, v1, s[6:7] | 
|  | ; GCN-NEXT:    s_endpgm | 
|  | bb: | 
|  | %i3 = load <3 x i32>, ptr addrspace(1) %arg, align 16 | 
|  | %i4 = extractelement <3 x i32> %i3, i64 0 | 
|  | %i5 = extractelement <3 x i32> %i3, i64 1 | 
|  | %i6 = extractelement <3 x i32> %i3, i64 2 | 
|  | %i7 = or i32 %i5, %i4 | 
|  | %i8 = or i32 %i7, %i6 | 
|  | %i9 = xor i32 %i8, -1 | 
|  | store i32 %i9, ptr addrspace(1) %arg, align 16 | 
|  | ret void | 
|  | } | 
|  |  | 
|  | define amdgpu_kernel void @uniform_or3_b64(ptr addrspace(1) %arg) { | 
|  | ; GCN-LABEL: uniform_or3_b64: | 
|  | ; GCN:       ; %bb.0: ; %bb | 
|  | ; GCN-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x24 | 
|  | ; GCN-NEXT:    v_mov_b32_e32 v2, 0 | 
|  | ; GCN-NEXT:    s_waitcnt lgkmcnt(0) | 
|  | ; GCN-NEXT:    s_load_dwordx4 s[0:3], s[6:7], 0x0 | 
|  | ; GCN-NEXT:    s_load_dwordx2 s[4:5], s[6:7], 0x10 | 
|  | ; GCN-NEXT:    s_waitcnt lgkmcnt(0) | 
|  | ; GCN-NEXT:    s_or_b64 s[0:1], s[2:3], s[0:1] | 
|  | ; GCN-NEXT:    s_nor_b64 s[0:1], s[0:1], s[4:5] | 
|  | ; GCN-NEXT:    v_mov_b32_e32 v0, s0 | 
|  | ; GCN-NEXT:    v_mov_b32_e32 v1, s1 | 
|  | ; GCN-NEXT:    global_store_dwordx2 v2, v[0:1], s[6:7] | 
|  | ; GCN-NEXT:    s_endpgm | 
|  | bb: | 
|  | %i3 = load <3 x i64>, ptr addrspace(1) %arg, align 32 | 
|  | %i4 = extractelement <3 x i64> %i3, i64 0 | 
|  | %i5 = extractelement <3 x i64> %i3, i64 1 | 
|  | %i6 = extractelement <3 x i64> %i3, i64 2 | 
|  | %i7 = or i64 %i5, %i4 | 
|  | %i8 = or i64 %i7, %i6 | 
|  | %i9 = xor i64 %i8, -1 | 
|  | store i64 %i9, ptr addrspace(1) %arg, align 32 | 
|  | ret void | 
|  | } | 
|  |  | 
|  | define amdgpu_kernel void @uniform_and3_b32(ptr addrspace(1) %arg) { | 
|  | ; GCN-LABEL: uniform_and3_b32: | 
|  | ; GCN:       ; %bb.0: ; %bb | 
|  | ; GCN-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x24 | 
|  | ; GCN-NEXT:    v_mov_b32_e32 v0, 0 | 
|  | ; GCN-NEXT:    s_waitcnt lgkmcnt(0) | 
|  | ; GCN-NEXT:    s_load_dwordx4 s[0:3], s[6:7], 0x0 | 
|  | ; GCN-NEXT:    s_waitcnt lgkmcnt(0) | 
|  | ; GCN-NEXT:    s_and_b32 s0, s1, s0 | 
|  | ; GCN-NEXT:    s_nand_b32 s0, s0, s2 | 
|  | ; GCN-NEXT:    v_mov_b32_e32 v1, s0 | 
|  | ; GCN-NEXT:    global_store_dword v0, v1, s[6:7] | 
|  | ; GCN-NEXT:    s_endpgm | 
|  | bb: | 
|  | %i3 = load <3 x i32>, ptr addrspace(1) %arg, align 16 | 
|  | %i4 = extractelement <3 x i32> %i3, i64 0 | 
|  | %i5 = extractelement <3 x i32> %i3, i64 1 | 
|  | %i6 = extractelement <3 x i32> %i3, i64 2 | 
|  | %i7 = and i32 %i5, %i4 | 
|  | %i8 = and i32 %i7, %i6 | 
|  | %i9 = xor i32 %i8, -1 | 
|  | store i32 %i9, ptr addrspace(1) %arg, align 16 | 
|  | ret void | 
|  | } | 
|  |  | 
|  | define amdgpu_kernel void @uniform_and3_b64(ptr addrspace(1) %arg) { | 
|  | ; GCN-LABEL: uniform_and3_b64: | 
|  | ; GCN:       ; %bb.0: ; %bb | 
|  | ; GCN-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x24 | 
|  | ; GCN-NEXT:    v_mov_b32_e32 v2, 0 | 
|  | ; GCN-NEXT:    s_waitcnt lgkmcnt(0) | 
|  | ; GCN-NEXT:    s_load_dwordx4 s[0:3], s[6:7], 0x0 | 
|  | ; GCN-NEXT:    s_load_dwordx2 s[4:5], s[6:7], 0x10 | 
|  | ; GCN-NEXT:    s_waitcnt lgkmcnt(0) | 
|  | ; GCN-NEXT:    s_and_b64 s[0:1], s[2:3], s[0:1] | 
|  | ; GCN-NEXT:    s_nand_b64 s[0:1], s[0:1], s[4:5] | 
|  | ; GCN-NEXT:    v_mov_b32_e32 v0, s0 | 
|  | ; GCN-NEXT:    v_mov_b32_e32 v1, s1 | 
|  | ; GCN-NEXT:    global_store_dwordx2 v2, v[0:1], s[6:7] | 
|  | ; GCN-NEXT:    s_endpgm | 
|  | bb: | 
|  | %i3 = load <3 x i64>, ptr addrspace(1) %arg, align 32 | 
|  | %i4 = extractelement <3 x i64> %i3, i64 0 | 
|  | %i5 = extractelement <3 x i64> %i3, i64 1 | 
|  | %i6 = extractelement <3 x i64> %i3, i64 2 | 
|  | %i7 = and i64 %i5, %i4 | 
|  | %i8 = and i64 %i7, %i6 | 
|  | %i9 = xor i64 %i8, -1 | 
|  | store i64 %i9, ptr addrspace(1) %arg, align 32 | 
|  | ret void | 
|  | } | 
|  |  | 
|  | define amdgpu_kernel void @uniform_xor3_b32(ptr addrspace(1) %arg) { | 
|  | ; GCN-LABEL: uniform_xor3_b32: | 
|  | ; GCN:       ; %bb.0: ; %bb | 
|  | ; GCN-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x24 | 
|  | ; GCN-NEXT:    v_mov_b32_e32 v0, 0 | 
|  | ; GCN-NEXT:    s_waitcnt lgkmcnt(0) | 
|  | ; GCN-NEXT:    s_load_dwordx4 s[0:3], s[6:7], 0x0 | 
|  | ; GCN-NEXT:    s_waitcnt lgkmcnt(0) | 
|  | ; GCN-NEXT:    s_xor_b32 s0, s1, s0 | 
|  | ; GCN-NEXT:    s_xnor_b32 s0, s0, s2 | 
|  | ; GCN-NEXT:    v_mov_b32_e32 v1, s0 | 
|  | ; GCN-NEXT:    global_store_dword v0, v1, s[6:7] | 
|  | ; GCN-NEXT:    s_endpgm | 
|  | bb: | 
|  | %i3 = load <3 x i32>, ptr addrspace(1) %arg, align 16 | 
|  | %i4 = extractelement <3 x i32> %i3, i64 0 | 
|  | %i5 = extractelement <3 x i32> %i3, i64 1 | 
|  | %i6 = extractelement <3 x i32> %i3, i64 2 | 
|  | %i7 = xor i32 %i5, %i4 | 
|  | %i8 = xor i32 %i7, %i6 | 
|  | %i9 = xor i32 %i8, -1 | 
|  | store i32 %i9, ptr addrspace(1) %arg, align 16 | 
|  | ret void | 
|  | } | 
|  |  | 
|  | define amdgpu_kernel void @uniform_xor3_b64(ptr addrspace(1) %arg) { | 
|  | ; GCN-LABEL: uniform_xor3_b64: | 
|  | ; GCN:       ; %bb.0: ; %bb | 
|  | ; GCN-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x24 | 
|  | ; GCN-NEXT:    v_mov_b32_e32 v2, 0 | 
|  | ; GCN-NEXT:    s_waitcnt lgkmcnt(0) | 
|  | ; GCN-NEXT:    s_load_dwordx4 s[0:3], s[6:7], 0x0 | 
|  | ; GCN-NEXT:    s_load_dwordx2 s[4:5], s[6:7], 0x10 | 
|  | ; GCN-NEXT:    s_waitcnt lgkmcnt(0) | 
|  | ; GCN-NEXT:    s_xor_b64 s[0:1], s[2:3], s[0:1] | 
|  | ; GCN-NEXT:    s_xnor_b64 s[0:1], s[0:1], s[4:5] | 
|  | ; GCN-NEXT:    v_mov_b32_e32 v0, s0 | 
|  | ; GCN-NEXT:    v_mov_b32_e32 v1, s1 | 
|  | ; GCN-NEXT:    global_store_dwordx2 v2, v[0:1], s[6:7] | 
|  | ; GCN-NEXT:    s_endpgm | 
|  | bb: | 
|  | %i3 = load <3 x i64>, ptr addrspace(1) %arg, align 32 | 
|  | %i4 = extractelement <3 x i64> %i3, i64 0 | 
|  | %i5 = extractelement <3 x i64> %i3, i64 1 | 
|  | %i6 = extractelement <3 x i64> %i3, i64 2 | 
|  | %i7 = xor i64 %i5, %i4 | 
|  | %i8 = xor i64 %i7, %i6 | 
|  | %i9 = xor i64 %i8, -1 | 
|  | store i64 %i9, ptr addrspace(1) %arg, align 32 | 
|  | ret void | 
|  | } | 
|  |  | 
|  | declare i32 @llvm.amdgcn.workitem.id.x() |