| ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py | 
 | ; RUN: opt -passes=instcombine -S < %s | FileCheck %s | 
 |  | 
 | ; Prohibit poiter cast for amx. | 
 | define dso_local void @test_amx_load_store(ptr %src, ptr %dst) { | 
 | ; CHECK-LABEL: @test_amx_load_store( | 
 | ; CHECK-NEXT:  entry: | 
 | ; CHECK-NEXT:    [[VEC:%.*]] = load <256 x i32>, ptr [[SRC:%.*]], align 64 | 
 | ; CHECK-NEXT:    [[BC:%.*]] = bitcast <256 x i32> [[VEC]] to x86_amx | 
 | ; CHECK-NEXT:    tail call void @llvm.x86.tilestored64.internal(i16 16, i16 16, ptr [[DST:%.*]], i64 64, x86_amx [[BC]]) | 
 | ; CHECK-NEXT:    ret void | 
 | ; | 
 | entry: | 
 |   %vec = load <256 x i32>, ptr %src, align 64 | 
 |   %bc = bitcast <256 x i32> %vec to x86_amx | 
 |   tail call void @llvm.x86.tilestored64.internal(i16 16, i16 16, ptr %dst, i64 64, x86_amx %bc) | 
 |   ret void | 
 | } | 
 |  | 
 | ; Prohibit poiter cast for amx. | 
 | define dso_local void @test_amx_load_store2(ptr %dst, ptr %src) { | 
 | ; CHECK-LABEL: @test_amx_load_store2( | 
 | ; CHECK-NEXT:  entry: | 
 | ; CHECK-NEXT:    [[AMX:%.*]] = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 16, ptr [[SRC:%.*]], i64 64) | 
 | ; CHECK-NEXT:    [[BC:%.*]] = bitcast x86_amx [[AMX]] to <256 x i32> | 
 | ; CHECK-NEXT:    store <256 x i32> [[BC]], ptr [[DST:%.*]], align 1024 | 
 | ; CHECK-NEXT:    ret void | 
 | ; | 
 | entry: | 
 |   %amx = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 16, ptr %src, i64 64) | 
 |   %bc = bitcast x86_amx %amx to <256 x i32> | 
 |   store <256 x i32> %bc, ptr %dst | 
 |   ret void | 
 | } | 
 |  | 
 | declare x86_amx @llvm.x86.tileloadd64.internal(i16, i16, ptr, i64) | 
 | declare void @llvm.x86.tilestored64.internal(i16, i16, ptr, i64, x86_amx) |