blob: 7437ce347d1a591a5d285771253fa8a06b235c1b [file] [log] [blame]
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 3
; RUN: opt -S -mtriple=amdgcn-- -amdgpu-lower-module-lds --amdgpu-lower-module-lds-strategy=module < %s | FileCheck %s
; RUN: opt -S -mtriple=amdgcn-- -passes=amdgpu-lower-module-lds --amdgpu-lower-module-lds-strategy=module < %s | FileCheck %s
@a = internal unnamed_addr addrspace(3) global [64 x i32] poison, align 4
@b = internal unnamed_addr addrspace(3) global [64 x i32] poison, align 4
define amdgpu_kernel void @no_clobber_ds_load_stores_x2_preexisting_aa(ptr addrspace(1) %arg, i32 %i) {
; CHECK-LABEL: define amdgpu_kernel void @no_clobber_ds_load_stores_x2_preexisting_aa(
; CHECK-SAME: ptr addrspace(1) [[ARG:%.*]], i32 [[I:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: bb:
; CHECK-NEXT: store i32 1, ptr addrspace(3) @llvm.amdgcn.kernel.no_clobber_ds_load_stores_x2_preexisting_aa.lds, align 16, !tbaa [[TBAA2:![0-9]+]], !noalias [[META7:![0-9]+]]
; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr inbounds [64 x i32], ptr addrspace(3) @llvm.amdgcn.kernel.no_clobber_ds_load_stores_x2_preexisting_aa.lds, i32 0, i32 [[I]]
; CHECK-NEXT: [[VAL_A:%.*]] = load i32, ptr addrspace(3) [[GEP_A]], align 4, !tbaa [[TBAA2]], !noalias [[META7]]
; CHECK-NEXT: store i32 2, ptr addrspace(3) getelementptr inbounds ([[LLVM_AMDGCN_KERNEL_NO_CLOBBER_DS_LOAD_STORES_X2_PREEXISTING_AA_LDS_T:%.*]], ptr addrspace(3) @llvm.amdgcn.kernel.no_clobber_ds_load_stores_x2_preexisting_aa.lds, i32 0, i32 1), align 16, !tbaa [[TBAA2]], !noalias [[META12:![0-9]+]]
; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds [64 x i32], ptr addrspace(3) getelementptr inbounds ([[LLVM_AMDGCN_KERNEL_NO_CLOBBER_DS_LOAD_STORES_X2_PREEXISTING_AA_LDS_T]], ptr addrspace(3) @llvm.amdgcn.kernel.no_clobber_ds_load_stores_x2_preexisting_aa.lds, i32 0, i32 1), i32 0, i32 [[I]]
; CHECK-NEXT: [[VAL_B:%.*]] = load i32, ptr addrspace(3) [[GEP_B]], align 4, !tbaa [[TBAA2]], !noalias [[META12]]
; CHECK-NEXT: [[VAL:%.*]] = add i32 [[VAL_A]], [[VAL_B]]
; CHECK-NEXT: store i32 [[VAL]], ptr addrspace(1) [[ARG]], align 4
; CHECK-NEXT: ret void
;
bb:
store i32 1, ptr addrspace(3) @a, align 4, !alias.scope !0, !noalias !3, !tbaa !5
%gep.a = getelementptr inbounds [64 x i32], ptr addrspace(3) @a, i32 0, i32 %i
%val.a = load i32, ptr addrspace(3) %gep.a, align 4, !alias.scope !0, !noalias !3, !tbaa !5
store i32 2, ptr addrspace(3) @b, align 4, !alias.scope !3, !noalias !0, !tbaa !5
%gep.b = getelementptr inbounds [64 x i32], ptr addrspace(3) @b, i32 0, i32 %i
%val.b = load i32, ptr addrspace(3) %gep.b, align 4, !alias.scope !3, !noalias !0, !tbaa !5
%val = add i32 %val.a, %val.b
store i32 %val, ptr addrspace(1) %arg, align 4
ret void
}
!0 = !{!1}
!1 = distinct !{!1, !2}
!2 = distinct !{!2}
!3 = !{!4}
!4 = distinct !{!4, !2}
!5 = !{!6, !7, i64 0}
!6 = !{!"no_clobber_ds_load_stores_x2_preexisting_aa", !7, i64 0}
!7 = !{!"int", !8, i64 0}
!8 = !{!"omnipotent char", !9, i64 0}
!9 = !{!"Simple C++ TBAA"}
; CHECK: !0 = !{i32 0, i32 1}
; CHECK: !1 = !{i32 1, !"amdgpu.lowered_lds", i32 1}
; CHECK: !2 = !{!3, !4, i64 0}
; CHECK: !3 = !{!"no_clobber_ds_load_stores_x2_preexisting_aa", !4, i64 0}
; CHECK: !4 = !{!"int", !5, i64 0}
; CHECK: !5 = !{!"omnipotent char", !6, i64 0}
; CHECK: !6 = !{!"Simple C++ TBAA"}
; CHECK: !7 = !{!8, !10}
; CHECK: !8 = distinct !{!8, !9}
; CHECK: !9 = distinct !{!9}
; CHECK: !10 = distinct !{!10, !11}
; CHECK: !11 = distinct !{!11}
; CHECK: !12 = !{!13, !14}
; CHECK: !13 = distinct !{!13, !9}
; CHECK: !14 = distinct !{!14, !11}