|  | ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py | 
|  | ; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 < %s | FileCheck %s --check-prefix=GFX12 | 
|  |  | 
|  | define amdgpu_ps void @test_wmma_i32_16x16x16_iu8_zext_src0(<2 x i32> %A, <2 x i32> %B, <8 x i32> %C, ptr addrspace(1) %out) { | 
|  | ; GFX12-LABEL: test_wmma_i32_16x16x16_iu8_zext_src0: | 
|  | ; GFX12:       ; %bb.0: ; %bb | 
|  | ; GFX12-NEXT:    v_wmma_i32_16x16x16_iu8 v[4:11], v[0:1], v[2:3], v[4:11] neg_lo:[1,0,0] | 
|  | ; GFX12-NEXT:    s_clause 0x1 | 
|  | ; GFX12-NEXT:    global_store_b128 v[12:13], v[8:11], off offset:16 | 
|  | ; GFX12-NEXT:    global_store_b128 v[12:13], v[4:7], off | 
|  | ; GFX12-NEXT:    s_endpgm | 
|  | bb: | 
|  | %res = call <8 x i32> @llvm.amdgcn.wmma.i32.16x16x16.iu8.v8i32.v2i32(i1 1, <2 x i32> %A, i1 0, <2 x i32> %B, <8 x i32> %C, i1 0) | 
|  | store <8 x i32> %res, ptr addrspace(1) %out | 
|  | ret void | 
|  | } | 
|  |  | 
|  | define amdgpu_ps void @test_wmma_i32_16x16x16_iu8_zext_src1(<2 x i32> %A, <2 x i32> %B, <8 x i32> %C, ptr addrspace(1) %out) { | 
|  | ; GFX12-LABEL: test_wmma_i32_16x16x16_iu8_zext_src1: | 
|  | ; GFX12:       ; %bb.0: ; %bb | 
|  | ; GFX12-NEXT:    v_wmma_i32_16x16x16_iu8 v[4:11], v[0:1], v[2:3], v[4:11] neg_lo:[0,1,0] | 
|  | ; GFX12-NEXT:    s_clause 0x1 | 
|  | ; GFX12-NEXT:    global_store_b128 v[12:13], v[8:11], off offset:16 | 
|  | ; GFX12-NEXT:    global_store_b128 v[12:13], v[4:7], off | 
|  | ; GFX12-NEXT:    s_endpgm | 
|  | bb: | 
|  | %res = call <8 x i32> @llvm.amdgcn.wmma.i32.16x16x16.iu8.v8i32.v2i32(i1 0, <2 x i32> %A, i1 1, <2 x i32> %B, <8 x i32> %C, i1 0) | 
|  | store <8 x i32> %res, ptr addrspace(1) %out | 
|  | ret void | 
|  | } | 
|  |  | 
|  | define amdgpu_ps void @test_wmma_i32_16x16x16_iu8_clamp(<2 x i32> %A, <2 x i32> %B, <8 x i32> %C, ptr addrspace(1) %out) { | 
|  | ; GFX12-LABEL: test_wmma_i32_16x16x16_iu8_clamp: | 
|  | ; GFX12:       ; %bb.0: ; %bb | 
|  | ; GFX12-NEXT:    v_wmma_i32_16x16x16_iu8 v[4:11], v[0:1], v[2:3], v[4:11] clamp | 
|  | ; GFX12-NEXT:    s_clause 0x1 | 
|  | ; GFX12-NEXT:    global_store_b128 v[12:13], v[8:11], off offset:16 | 
|  | ; GFX12-NEXT:    global_store_b128 v[12:13], v[4:7], off | 
|  | ; GFX12-NEXT:    s_endpgm | 
|  | bb: | 
|  | %res = call <8 x i32> @llvm.amdgcn.wmma.i32.16x16x16.iu8.v8i32.v2i32(i1 0, <2 x i32> %A, i1 0, <2 x i32> %B, <8 x i32> %C, i1 1) | 
|  | store <8 x i32> %res, ptr addrspace(1) %out | 
|  | ret void | 
|  | } | 
|  |  | 
|  |  | 
|  |  | 
|  | define amdgpu_ps void @test_wmma_i32_16x16x16_iu4_zext_src0(i32 %A, i32 %B, <8 x i32> %C, ptr addrspace(1) %out) { | 
|  | ; GFX12-LABEL: test_wmma_i32_16x16x16_iu4_zext_src0: | 
|  | ; GFX12:       ; %bb.0: ; %bb | 
|  | ; GFX12-NEXT:    v_wmma_i32_16x16x16_iu4 v[2:9], v0, v1, v[2:9] neg_lo:[1,0,0] | 
|  | ; GFX12-NEXT:    s_clause 0x1 | 
|  | ; GFX12-NEXT:    global_store_b128 v[10:11], v[6:9], off offset:16 | 
|  | ; GFX12-NEXT:    global_store_b128 v[10:11], v[2:5], off | 
|  | ; GFX12-NEXT:    s_endpgm | 
|  | bb: | 
|  | %res = call <8 x i32> @llvm.amdgcn.wmma.i32.16x16x16.iu4.v8i32.i32(i1 1, i32 %A, i1 0, i32 %B, <8 x i32> %C, i1 0) | 
|  | store <8 x i32> %res, ptr addrspace(1) %out | 
|  | ret void | 
|  | } | 
|  |  | 
|  | define amdgpu_ps void @test_wmma_i32_16x16x16_iu4_zext_src1(i32 %A, i32 %B, <8 x i32> %C, ptr addrspace(1) %out) { | 
|  | ; GFX12-LABEL: test_wmma_i32_16x16x16_iu4_zext_src1: | 
|  | ; GFX12:       ; %bb.0: ; %bb | 
|  | ; GFX12-NEXT:    v_wmma_i32_16x16x16_iu4 v[2:9], v0, v1, v[2:9] neg_lo:[0,1,0] | 
|  | ; GFX12-NEXT:    s_clause 0x1 | 
|  | ; GFX12-NEXT:    global_store_b128 v[10:11], v[6:9], off offset:16 | 
|  | ; GFX12-NEXT:    global_store_b128 v[10:11], v[2:5], off | 
|  | ; GFX12-NEXT:    s_endpgm | 
|  | bb: | 
|  | %res = call <8 x i32> @llvm.amdgcn.wmma.i32.16x16x16.iu4.v8i32.i32(i1 0, i32 %A, i1 1, i32 %B, <8 x i32> %C, i1 0) | 
|  | store <8 x i32> %res, ptr addrspace(1) %out | 
|  | ret void | 
|  | } | 
|  |  | 
|  | define amdgpu_ps void @test_wmma_i32_16x16x16_iu4_clamp(i32 %A, i32 %B, <8 x i32> %C, ptr addrspace(1) %out) { | 
|  | ; GFX12-LABEL: test_wmma_i32_16x16x16_iu4_clamp: | 
|  | ; GFX12:       ; %bb.0: ; %bb | 
|  | ; GFX12-NEXT:    v_wmma_i32_16x16x16_iu4 v[2:9], v0, v1, v[2:9] clamp | 
|  | ; GFX12-NEXT:    s_clause 0x1 | 
|  | ; GFX12-NEXT:    global_store_b128 v[10:11], v[6:9], off offset:16 | 
|  | ; GFX12-NEXT:    global_store_b128 v[10:11], v[2:5], off | 
|  | ; GFX12-NEXT:    s_endpgm | 
|  | bb: | 
|  | %res = call <8 x i32> @llvm.amdgcn.wmma.i32.16x16x16.iu4.v8i32.i32(i1 0, i32 %A, i1 0, i32 %B, <8 x i32> %C, i1 1) | 
|  | store <8 x i32> %res, ptr addrspace(1) %out | 
|  | ret void | 
|  | } | 
|  |  | 
|  |  | 
|  |  | 
|  | define amdgpu_ps void @test_wmma_i32_16x16x32_iu4_zext_src0(<2 x i32> %A, <2 x i32> %B, <8 x i32> %C, ptr addrspace(1) %out) { | 
|  | ; GFX12-LABEL: test_wmma_i32_16x16x32_iu4_zext_src0: | 
|  | ; GFX12:       ; %bb.0: ; %bb | 
|  | ; GFX12-NEXT:    v_wmma_i32_16x16x32_iu4 v[4:11], v[0:1], v[2:3], v[4:11] neg_lo:[1,0,0] | 
|  | ; GFX12-NEXT:    s_clause 0x1 | 
|  | ; GFX12-NEXT:    global_store_b128 v[12:13], v[8:11], off offset:16 | 
|  | ; GFX12-NEXT:    global_store_b128 v[12:13], v[4:7], off | 
|  | ; GFX12-NEXT:    s_endpgm | 
|  | bb: | 
|  | %res = call <8 x i32> @llvm.amdgcn.wmma.i32.16x16x32.iu4.v8i32.v2i32(i1 1, <2 x i32> %A, i1 0, <2 x i32> %B, <8 x i32> %C, i1 0) | 
|  | store <8 x i32> %res, ptr addrspace(1) %out | 
|  | ret void | 
|  | } | 
|  |  | 
|  | define amdgpu_ps void @test_wmma_i32_16x16x32_iu4_zext_src1(<2 x i32> %A, <2 x i32> %B, <8 x i32> %C, ptr addrspace(1) %out) { | 
|  | ; GFX12-LABEL: test_wmma_i32_16x16x32_iu4_zext_src1: | 
|  | ; GFX12:       ; %bb.0: ; %bb | 
|  | ; GFX12-NEXT:    v_wmma_i32_16x16x32_iu4 v[4:11], v[0:1], v[2:3], v[4:11] neg_lo:[0,1,0] | 
|  | ; GFX12-NEXT:    s_clause 0x1 | 
|  | ; GFX12-NEXT:    global_store_b128 v[12:13], v[8:11], off offset:16 | 
|  | ; GFX12-NEXT:    global_store_b128 v[12:13], v[4:7], off | 
|  | ; GFX12-NEXT:    s_endpgm | 
|  | bb: | 
|  | %res = call <8 x i32> @llvm.amdgcn.wmma.i32.16x16x32.iu4.v8i32.v2i32(i1 0, <2 x i32> %A, i1 1, <2 x i32> %B, <8 x i32> %C, i1 0) | 
|  | store <8 x i32> %res, ptr addrspace(1) %out | 
|  | ret void | 
|  | } | 
|  |  | 
|  | define amdgpu_ps void @test_wmma_i32_16x16x32_iu4_clamp(<2 x i32> %A, <2 x i32> %B, <8 x i32> %C, ptr addrspace(1) %out) { | 
|  | ; GFX12-LABEL: test_wmma_i32_16x16x32_iu4_clamp: | 
|  | ; GFX12:       ; %bb.0: ; %bb | 
|  | ; GFX12-NEXT:    v_wmma_i32_16x16x32_iu4 v[4:11], v[0:1], v[2:3], v[4:11] clamp | 
|  | ; GFX12-NEXT:    s_clause 0x1 | 
|  | ; GFX12-NEXT:    global_store_b128 v[12:13], v[8:11], off offset:16 | 
|  | ; GFX12-NEXT:    global_store_b128 v[12:13], v[4:7], off | 
|  | ; GFX12-NEXT:    s_endpgm | 
|  | bb: | 
|  | %res = call <8 x i32> @llvm.amdgcn.wmma.i32.16x16x32.iu4.v8i32.v2i32(i1 0, <2 x i32> %A, i1 0, <2 x i32> %B, <8 x i32> %C, i1 1) | 
|  | store <8 x i32> %res, ptr addrspace(1) %out | 
|  | ret void | 
|  | } | 
|  |  | 
|  |  | 
|  |  | 
|  |  | 
|  |  | 
|  | define amdgpu_ps void @test_swmmac_i32_16x16x32_iu8_zext_src0(<2 x i32> %A, <4 x i32> %B, <8 x i32> %C, i16 %Index, ptr addrspace(1) %out) { | 
|  | ; GFX12-LABEL: test_swmmac_i32_16x16x32_iu8_zext_src0: | 
|  | ; GFX12:       ; %bb.0: ; %bb | 
|  | ; GFX12-NEXT:    v_swmmac_i32_16x16x32_iu8 v[6:13], v[0:1], v[2:5], v14 neg_lo:[1,0,0] | 
|  | ; GFX12-NEXT:    s_clause 0x1 | 
|  | ; GFX12-NEXT:    global_store_b128 v[15:16], v[10:13], off offset:16 | 
|  | ; GFX12-NEXT:    global_store_b128 v[15:16], v[6:9], off | 
|  | ; GFX12-NEXT:    s_endpgm | 
|  | bb: | 
|  | %res = call <8 x i32> @llvm.amdgcn.swmmac.i32.16x16x32.iu8.v8i32.v2i32.v4i32.i16(i1 1, <2 x i32> %A, i1 0, <4 x i32> %B, <8 x i32> %C, i16 %Index, i1 0) | 
|  | store <8 x i32> %res, ptr addrspace(1) %out | 
|  | ret void | 
|  | } | 
|  |  | 
|  | define amdgpu_ps void @test_swmmac_i32_16x16x32_iu8_zext_src1(<2 x i32> %A, <4 x i32> %B, <8 x i32> %C, i16 %Index, ptr addrspace(1) %out) { | 
|  | ; GFX12-LABEL: test_swmmac_i32_16x16x32_iu8_zext_src1: | 
|  | ; GFX12:       ; %bb.0: ; %bb | 
|  | ; GFX12-NEXT:    v_swmmac_i32_16x16x32_iu8 v[6:13], v[0:1], v[2:5], v14 neg_lo:[0,1,0] | 
|  | ; GFX12-NEXT:    s_clause 0x1 | 
|  | ; GFX12-NEXT:    global_store_b128 v[15:16], v[10:13], off offset:16 | 
|  | ; GFX12-NEXT:    global_store_b128 v[15:16], v[6:9], off | 
|  | ; GFX12-NEXT:    s_endpgm | 
|  | bb: | 
|  | %res = call <8 x i32> @llvm.amdgcn.swmmac.i32.16x16x32.iu8.v8i32.v2i32.v4i32.i16(i1 0, <2 x i32> %A, i1 1, <4 x i32> %B, <8 x i32> %C, i16 %Index, i1 0) | 
|  | store <8 x i32> %res, ptr addrspace(1) %out | 
|  | ret void | 
|  | } | 
|  |  | 
|  | define amdgpu_ps void @test_swmmac_i32_16x16x32_iu8_clamp(<2 x i32> %A, <4 x i32> %B, <8 x i32> %C, i16 %Index, ptr addrspace(1) %out) { | 
|  | ; GFX12-LABEL: test_swmmac_i32_16x16x32_iu8_clamp: | 
|  | ; GFX12:       ; %bb.0: ; %bb | 
|  | ; GFX12-NEXT:    v_swmmac_i32_16x16x32_iu8 v[6:13], v[0:1], v[2:5], v14 clamp | 
|  | ; GFX12-NEXT:    s_clause 0x1 | 
|  | ; GFX12-NEXT:    global_store_b128 v[15:16], v[10:13], off offset:16 | 
|  | ; GFX12-NEXT:    global_store_b128 v[15:16], v[6:9], off | 
|  | ; GFX12-NEXT:    s_endpgm | 
|  | bb: | 
|  | %res = call <8 x i32> @llvm.amdgcn.swmmac.i32.16x16x32.iu8.v8i32.v2i32.v4i32.i16(i1 0, <2 x i32> %A, i1 0, <4 x i32> %B, <8 x i32> %C, i16 %Index, i1 1) | 
|  | store <8 x i32> %res, ptr addrspace(1) %out | 
|  | ret void | 
|  | } | 
|  |  | 
|  |  | 
|  |  | 
|  | define amdgpu_ps void @test_swmmac_i32_16x16x32_iu4_zext_src0(i32 %A, <2 x i32> %B, <8 x i32> %C, i16 %Index, ptr addrspace(1) %out) { | 
|  | ; GFX12-LABEL: test_swmmac_i32_16x16x32_iu4_zext_src0: | 
|  | ; GFX12:       ; %bb.0: ; %bb | 
|  | ; GFX12-NEXT:    v_swmmac_i32_16x16x32_iu4 v[3:10], v0, v[1:2], v11 neg_lo:[1,0,0] | 
|  | ; GFX12-NEXT:    s_clause 0x1 | 
|  | ; GFX12-NEXT:    global_store_b128 v[12:13], v[7:10], off offset:16 | 
|  | ; GFX12-NEXT:    global_store_b128 v[12:13], v[3:6], off | 
|  | ; GFX12-NEXT:    s_endpgm | 
|  | bb: | 
|  | %res = call <8 x i32> @llvm.amdgcn.swmmac.i32.16x16x32.iu4.v8i32.i32.v2i32.i16(i1 1, i32 %A, i1 0, <2 x i32> %B, <8 x i32> %C, i16 %Index, i1 0) | 
|  | store <8 x i32> %res, ptr addrspace(1) %out | 
|  | ret void | 
|  | } | 
|  |  | 
|  | define amdgpu_ps void @test_swmmac_i32_16x16x32_iu4_zext_src1(i32 %A, <2 x i32> %B, <8 x i32> %C, i16 %Index, ptr addrspace(1) %out) { | 
|  | ; GFX12-LABEL: test_swmmac_i32_16x16x32_iu4_zext_src1: | 
|  | ; GFX12:       ; %bb.0: ; %bb | 
|  | ; GFX12-NEXT:    v_swmmac_i32_16x16x32_iu4 v[3:10], v0, v[1:2], v11 neg_lo:[0,1,0] | 
|  | ; GFX12-NEXT:    s_clause 0x1 | 
|  | ; GFX12-NEXT:    global_store_b128 v[12:13], v[7:10], off offset:16 | 
|  | ; GFX12-NEXT:    global_store_b128 v[12:13], v[3:6], off | 
|  | ; GFX12-NEXT:    s_endpgm | 
|  | bb: | 
|  | %res = call <8 x i32> @llvm.amdgcn.swmmac.i32.16x16x32.iu4.v8i32.i32.v2i32.i16(i1 0, i32 %A, i1 1, <2 x i32> %B, <8 x i32> %C, i16 %Index, i1 0) | 
|  | store <8 x i32> %res, ptr addrspace(1) %out | 
|  | ret void | 
|  | } | 
|  |  | 
|  | define amdgpu_ps void @test_swmmac_i32_16x16x32_iu4_clamp(i32 %A, <2 x i32> %B, <8 x i32> %C, i16 %Index, ptr addrspace(1) %out) { | 
|  | ; GFX12-LABEL: test_swmmac_i32_16x16x32_iu4_clamp: | 
|  | ; GFX12:       ; %bb.0: ; %bb | 
|  | ; GFX12-NEXT:    v_swmmac_i32_16x16x32_iu4 v[3:10], v0, v[1:2], v11 clamp | 
|  | ; GFX12-NEXT:    s_clause 0x1 | 
|  | ; GFX12-NEXT:    global_store_b128 v[12:13], v[7:10], off offset:16 | 
|  | ; GFX12-NEXT:    global_store_b128 v[12:13], v[3:6], off | 
|  | ; GFX12-NEXT:    s_endpgm | 
|  | bb: | 
|  | %res = call <8 x i32> @llvm.amdgcn.swmmac.i32.16x16x32.iu4.v8i32.i32.v2i32.i16(i1 0, i32 %A, i1 0, <2 x i32> %B, <8 x i32> %C, i16 %Index, i1 1) | 
|  | store <8 x i32> %res, ptr addrspace(1) %out | 
|  | ret void | 
|  | } | 
|  |  | 
|  |  | 
|  |  | 
|  | define amdgpu_ps void @test_swmmac_i32_16x16x64_iu4_zext_src0(<2 x i32> %A, <4 x i32> %B, <8 x i32> %C, i32 %Index, ptr addrspace(1) %out) { | 
|  | ; GFX12-LABEL: test_swmmac_i32_16x16x64_iu4_zext_src0: | 
|  | ; GFX12:       ; %bb.0: ; %bb | 
|  | ; GFX12-NEXT:    v_swmmac_i32_16x16x64_iu4 v[6:13], v[0:1], v[2:5], v14 neg_lo:[1,0,0] | 
|  | ; GFX12-NEXT:    s_clause 0x1 | 
|  | ; GFX12-NEXT:    global_store_b128 v[15:16], v[10:13], off offset:16 | 
|  | ; GFX12-NEXT:    global_store_b128 v[15:16], v[6:9], off | 
|  | ; GFX12-NEXT:    s_endpgm | 
|  | bb: | 
|  | %res = call <8 x i32> @llvm.amdgcn.swmmac.i32.16x16x64.iu4.v8i32.v2i32.v4i32.i32(i1 1, <2 x i32> %A, i1 0, <4 x i32> %B, <8 x i32> %C, i32 %Index, i1 0) | 
|  | store <8 x i32> %res, ptr addrspace(1) %out | 
|  | ret void | 
|  | } | 
|  |  | 
|  | define amdgpu_ps void @test_swmmac_i32_16x16x64_iu4_zext_src1(<2 x i32> %A, <4 x i32> %B, <8 x i32> %C, i32 %Index, ptr addrspace(1) %out) { | 
|  | ; GFX12-LABEL: test_swmmac_i32_16x16x64_iu4_zext_src1: | 
|  | ; GFX12:       ; %bb.0: ; %bb | 
|  | ; GFX12-NEXT:    v_swmmac_i32_16x16x64_iu4 v[6:13], v[0:1], v[2:5], v14 neg_lo:[0,1,0] | 
|  | ; GFX12-NEXT:    s_clause 0x1 | 
|  | ; GFX12-NEXT:    global_store_b128 v[15:16], v[10:13], off offset:16 | 
|  | ; GFX12-NEXT:    global_store_b128 v[15:16], v[6:9], off | 
|  | ; GFX12-NEXT:    s_endpgm | 
|  | bb: | 
|  | %res = call <8 x i32> @llvm.amdgcn.swmmac.i32.16x16x64.iu4.v8i32.v2i32.v4i32.i32(i1 0, <2 x i32> %A, i1 1, <4 x i32> %B, <8 x i32> %C, i32 %Index, i1 0) | 
|  | store <8 x i32> %res, ptr addrspace(1) %out | 
|  | ret void | 
|  | } | 
|  |  | 
|  | define amdgpu_ps void @test_swmmac_i32_16x16x64_iu4_clamp(<2 x i32> %A, <4 x i32> %B, <8 x i32> %C, i32 %Index, ptr addrspace(1) %out) { | 
|  | ; GFX12-LABEL: test_swmmac_i32_16x16x64_iu4_clamp: | 
|  | ; GFX12:       ; %bb.0: ; %bb | 
|  | ; GFX12-NEXT:    v_swmmac_i32_16x16x64_iu4 v[6:13], v[0:1], v[2:5], v14 clamp | 
|  | ; GFX12-NEXT:    s_clause 0x1 | 
|  | ; GFX12-NEXT:    global_store_b128 v[15:16], v[10:13], off offset:16 | 
|  | ; GFX12-NEXT:    global_store_b128 v[15:16], v[6:9], off | 
|  | ; GFX12-NEXT:    s_endpgm | 
|  | bb: | 
|  | %res = call <8 x i32> @llvm.amdgcn.swmmac.i32.16x16x64.iu4.v8i32.v2i32.v4i32.i32(i1 0, <2 x i32> %A, i1 0, <4 x i32> %B, <8 x i32> %C, i32 %Index, i1 1) | 
|  | store <8 x i32> %res, ptr addrspace(1) %out | 
|  | ret void | 
|  | } | 
|  |  | 
|  | declare <8 x i32> @llvm.amdgcn.wmma.i32.16x16x16.iu8.v8i32.v2i32(i1 immarg, <2 x i32>, i1 immarg, <2 x i32>, <8 x i32>, i1 immarg) | 
|  | declare <8 x i32> @llvm.amdgcn.wmma.i32.16x16x16.iu4.v8i32.i32(i1 immarg, i32, i1 immarg, i32, <8 x i32>, i1 immarg) | 
|  | declare <8 x i32> @llvm.amdgcn.wmma.i32.16x16x32.iu4.v8i32.v2i32(i1 immarg, <2 x i32>, i1 immarg, <2 x i32>, <8 x i32>, i1 immarg) | 
|  | declare <8 x i32> @llvm.amdgcn.swmmac.i32.16x16x32.iu8.v8i32.v2i32.v4i32.i16(i1 immarg, <2 x i32>, i1 immarg, <4 x i32>, <8 x i32>, i16 %Index, i1 immarg) | 
|  | declare <8 x i32> @llvm.amdgcn.swmmac.i32.16x16x32.iu4.v8i32.i32.v2i32.i16(i1 immarg, i32, i1 immarg, <2 x i32>, <8 x i32>, i16 %Index, i1 immarg) | 
|  | declare <8 x i32> @llvm.amdgcn.swmmac.i32.16x16x64.iu4.v8i32.v2i32.v4i32.i32(i1 immarg, <2 x i32>, i1 immarg, <4 x i32>, <8 x i32>, i32 %Index, i1 immarg) |