blob: 3ff9e6a3b3da4f822906fd578a6a6a030124fb11 [file] [log] [blame]
Matt Arsenault2b0a88f2024-08-11 15:07:17 +04001; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals all --version 5
2; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -amdgpu-perf-hint < %s | FileCheck -check-prefix=CHECK %s
Matt Arsenaultdd094b22024-08-11 15:11:10 +04003; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -passes=amdgpu-perf-hint < %s | FileCheck -check-prefix=CHECK %s
Fangrui Song9e9907f2024-01-16 21:54:58 -08004; RUN: llc -mtriple=amdgcn < %s | FileCheck -check-prefix=GCN %s
Stanislav Mekhanoshin1c538422018-05-25 17:25:12 +00005
6; GCN-LABEL: {{^}}test_membound:
Jay Foad2dc3d1b2020-07-17 12:56:29 +01007; GCN: MemoryBound: 1
8; GCN: WaveLimiterHint : 1
Yashwant Singh9e0d8ab2022-12-22 09:47:28 +05309define amdgpu_kernel void @test_membound(ptr addrspace(1) nocapture readonly %arg, ptr addrspace(1) nocapture %arg1) {
Matt Arsenault2b0a88f2024-08-11 15:07:17 +040010; CHECK-LABEL: define amdgpu_kernel void @test_membound(
Nikita Popov29441e42025-01-29 16:56:47 +010011; CHECK-SAME: ptr addrspace(1) readonly captures(none) [[ARG:%.*]], ptr addrspace(1) captures(none) [[ARG1:%.*]]) #[[ATTR0:[0-9]+]] {
Matt Arsenault2b0a88f2024-08-11 15:07:17 +040012; CHECK-NEXT: [[BB:.*:]]
13; CHECK-NEXT: [[TMP:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
14; CHECK-NEXT: [[TMP2:%.*]] = zext i32 [[TMP]] to i64
15; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds <4 x i32>, ptr addrspace(1) [[ARG]], i64 [[TMP2]]
16; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr addrspace(1) [[TMP3]], align 16
17; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds <4 x i32>, ptr addrspace(1) [[ARG1]], i64 [[TMP2]]
18; CHECK-NEXT: store <4 x i32> [[TMP4]], ptr addrspace(1) [[TMP5]], align 16
19; CHECK-NEXT: [[TMP6:%.*]] = add nuw nsw i64 [[TMP2]], 1
20; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds <4 x i32>, ptr addrspace(1) [[ARG]], i64 [[TMP6]]
21; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i32>, ptr addrspace(1) [[TMP7]], align 16
22; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds <4 x i32>, ptr addrspace(1) [[ARG1]], i64 [[TMP6]]
23; CHECK-NEXT: store <4 x i32> [[TMP8]], ptr addrspace(1) [[TMP9]], align 16
24; CHECK-NEXT: ret void
25;
Stanislav Mekhanoshin1c538422018-05-25 17:25:12 +000026bb:
27 %tmp = tail call i32 @llvm.amdgcn.workitem.id.x()
28 %tmp2 = zext i32 %tmp to i64
Yashwant Singh9e0d8ab2022-12-22 09:47:28 +053029 %tmp3 = getelementptr inbounds <4 x i32>, ptr addrspace(1) %arg, i64 %tmp2
30 %tmp4 = load <4 x i32>, ptr addrspace(1) %tmp3, align 16
31 %tmp5 = getelementptr inbounds <4 x i32>, ptr addrspace(1) %arg1, i64 %tmp2
32 store <4 x i32> %tmp4, ptr addrspace(1) %tmp5, align 16
Stanislav Mekhanoshin1c538422018-05-25 17:25:12 +000033 %tmp6 = add nuw nsw i64 %tmp2, 1
Yashwant Singh9e0d8ab2022-12-22 09:47:28 +053034 %tmp7 = getelementptr inbounds <4 x i32>, ptr addrspace(1) %arg, i64 %tmp6
35 %tmp8 = load <4 x i32>, ptr addrspace(1) %tmp7, align 16
36 %tmp9 = getelementptr inbounds <4 x i32>, ptr addrspace(1) %arg1, i64 %tmp6
37 store <4 x i32> %tmp8, ptr addrspace(1) %tmp9, align 16
Stanislav Mekhanoshin1c538422018-05-25 17:25:12 +000038 ret void
39}
40
Abinav Puthan Purayil4baf8f02022-07-13 23:34:57 +053041; GCN-LABEL: {{^}}test_membound_1:
Abinav Puthan Purayil9fa425c2022-07-13 12:10:02 +053042; GCN: MemoryBound: 1
Yashwant Singh9e0d8ab2022-12-22 09:47:28 +053043define amdgpu_kernel void @test_membound_1(ptr addrspace(1) nocapture readonly %ptr.0,
Matt Arsenault2b0a88f2024-08-11 15:07:17 +040044; CHECK-LABEL: define amdgpu_kernel void @test_membound_1(
Nikita Popov29441e42025-01-29 16:56:47 +010045; CHECK-SAME: ptr addrspace(1) readonly captures(none) [[PTR_0:%.*]], ptr addrspace(1) captures(none) [[PTR_1:%.*]], <2 x double> [[ARG_0:%.*]], i32 [[ARG_1:%.*]], <4 x double> [[ARG_2:%.*]]) #[[ATTR1:[0-9]+]] {
Matt Arsenault2b0a88f2024-08-11 15:07:17 +040046; CHECK-NEXT: [[BB_ENTRY:.*:]]
47; CHECK-NEXT: [[ID_32:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
48; CHECK-NEXT: [[ID_0:%.*]] = zext i32 [[ID_32]] to i64
49; CHECK-NEXT: [[GEP_0:%.*]] = getelementptr inbounds <2 x double>, ptr addrspace(1) [[PTR_0]], i64 [[ID_0]]
50; CHECK-NEXT: [[LD_0:%.*]] = load <2 x double>, ptr addrspace(1) [[GEP_0]], align 16
51; CHECK-NEXT: [[ADD_0:%.*]] = fadd <2 x double> [[ARG_0]], [[LD_0]]
52; CHECK-NEXT: [[ID_1:%.*]] = add nuw nsw i64 [[ID_0]], 1
53; CHECK-NEXT: [[GEP_1:%.*]] = getelementptr inbounds <2 x double>, ptr addrspace(1) [[PTR_0]], i64 [[ID_1]]
54; CHECK-NEXT: [[LD_1:%.*]] = load <2 x double>, ptr addrspace(1) [[GEP_1]], align 16
55; CHECK-NEXT: [[ADD_1:%.*]] = fadd <2 x double> [[ADD_0]], [[LD_1]]
56; CHECK-NEXT: [[ID_2:%.*]] = add nuw nsw i64 [[ID_0]], 2
57; CHECK-NEXT: [[GEP_2:%.*]] = getelementptr inbounds <2 x double>, ptr addrspace(1) [[PTR_0]], i64 [[ID_2]]
58; CHECK-NEXT: [[LD_2:%.*]] = load <2 x double>, ptr addrspace(1) [[GEP_2]], align 16
59; CHECK-NEXT: [[ADD_2:%.*]] = fadd <2 x double> [[ADD_1]], [[LD_2]]
60; CHECK-NEXT: [[ID_3:%.*]] = add nuw nsw i64 [[ID_0]], 3
61; CHECK-NEXT: [[GEP_3:%.*]] = getelementptr inbounds <2 x double>, ptr addrspace(1) [[PTR_0]], i64 [[ID_3]]
62; CHECK-NEXT: [[LD_3:%.*]] = load <2 x double>, ptr addrspace(1) [[GEP_3]], align 16
63; CHECK-NEXT: [[ADD_3:%.*]] = fadd <2 x double> [[ADD_2]], [[LD_3]]
64; CHECK-NEXT: [[ID_4:%.*]] = add nuw nsw i64 [[ID_0]], 4
65; CHECK-NEXT: [[GEP_4:%.*]] = getelementptr inbounds <2 x double>, ptr addrspace(1) [[PTR_0]], i64 [[ID_4]]
66; CHECK-NEXT: [[LD_4:%.*]] = load <2 x double>, ptr addrspace(1) [[GEP_4]], align 16
67; CHECK-NEXT: [[ADD_4:%.*]] = fadd <2 x double> [[ADD_3]], [[LD_4]]
68; CHECK-NEXT: store <2 x double> [[ADD_4]], ptr addrspace(1) [[PTR_1]], align 16
69; CHECK-NEXT: [[COND:%.*]] = icmp eq i32 [[ARG_1]], 0
70; CHECK-NEXT: br i1 [[COND]], label %[[BB_TRUE:.*]], label %[[BB_RET:.*]]
71; CHECK: [[BB_TRUE]]:
72; CHECK-NEXT: [[I0_ARG_0:%.*]] = extractelement <2 x double> [[ARG_0]], i32 0
73; CHECK-NEXT: [[I1_ARG_0:%.*]] = extractelement <2 x double> [[ARG_0]], i32 1
74; CHECK-NEXT: [[ADD_1_0:%.*]] = fadd double [[I0_ARG_0]], [[I1_ARG_0]]
75; CHECK-NEXT: [[I0_ARG_2:%.*]] = extractelement <4 x double> [[ARG_2]], i32 0
76; CHECK-NEXT: [[I1_ARG_2:%.*]] = extractelement <4 x double> [[ARG_2]], i32 1
77; CHECK-NEXT: [[ADD_1_1:%.*]] = fadd double [[I0_ARG_2]], [[I1_ARG_2]]
78; CHECK-NEXT: [[ADD_1_2:%.*]] = fadd double [[ADD_1_0]], [[ADD_1_1]]
79; CHECK-NEXT: [[I2_ARG_2:%.*]] = extractelement <4 x double> [[ARG_2]], i32 2
80; CHECK-NEXT: [[I3_ARG_2:%.*]] = extractelement <4 x double> [[ARG_2]], i32 3
81; CHECK-NEXT: [[ADD_1_3:%.*]] = fadd double [[I2_ARG_2]], [[I3_ARG_2]]
82; CHECK-NEXT: [[ADD_1_4:%.*]] = fadd double [[ADD_1_2]], [[ADD_1_3]]
83; CHECK-NEXT: [[I0_ADD_0:%.*]] = extractelement <2 x double> [[ADD_0]], i32 0
84; CHECK-NEXT: [[I1_ADD_0:%.*]] = extractelement <2 x double> [[ADD_0]], i32 1
85; CHECK-NEXT: [[ADD_1_5:%.*]] = fadd double [[I0_ADD_0]], [[I1_ADD_0]]
86; CHECK-NEXT: [[ADD_1_6:%.*]] = fadd double [[ADD_1_4]], [[ADD_1_5]]
87; CHECK-NEXT: [[I0_ADD_1:%.*]] = extractelement <2 x double> [[ADD_1]], i32 0
88; CHECK-NEXT: [[I1_ADD_1:%.*]] = extractelement <2 x double> [[ADD_1]], i32 1
89; CHECK-NEXT: [[ADD_1_7:%.*]] = fadd double [[I0_ADD_1]], [[I1_ADD_1]]
90; CHECK-NEXT: [[ADD_1_8:%.*]] = fadd double [[ADD_1_6]], [[ADD_1_7]]
91; CHECK-NEXT: [[I0_ADD_2:%.*]] = extractelement <2 x double> [[ADD_2]], i32 0
92; CHECK-NEXT: [[I1_ADD_2:%.*]] = extractelement <2 x double> [[ADD_2]], i32 1
93; CHECK-NEXT: [[ADD_1_9:%.*]] = fadd double [[I0_ADD_2]], [[I1_ADD_2]]
94; CHECK-NEXT: [[ADD_1_10:%.*]] = fadd double [[ADD_1_8]], [[ADD_1_9]]
95; CHECK-NEXT: store double [[ADD_1_8]], ptr addrspace(1) [[PTR_1]], align 8
96; CHECK-NEXT: br label %[[BB_RET]]
97; CHECK: [[BB_RET]]:
98; CHECK-NEXT: ret void
99;
100 ptr addrspace(1) nocapture %ptr.1,
101 <2 x double> %arg.0, i32 %arg.1, <4 x double> %arg.2) {
Abinav Puthan Purayil4baf8f02022-07-13 23:34:57 +0530102bb.entry:
103 %id.32 = tail call i32 @llvm.amdgcn.workitem.id.x()
104 %id.0 = zext i32 %id.32 to i64
Yashwant Singh9e0d8ab2022-12-22 09:47:28 +0530105 %gep.0 = getelementptr inbounds <2 x double>, ptr addrspace(1) %ptr.0, i64 %id.0
106 %ld.0 = load <2 x double>, ptr addrspace(1) %gep.0, align 16
Abinav Puthan Purayil4baf8f02022-07-13 23:34:57 +0530107 %add.0 = fadd <2 x double> %arg.0, %ld.0
108
109 %id.1 = add nuw nsw i64 %id.0, 1
Yashwant Singh9e0d8ab2022-12-22 09:47:28 +0530110 %gep.1 = getelementptr inbounds <2 x double>, ptr addrspace(1) %ptr.0, i64 %id.1
111 %ld.1 = load <2 x double>, ptr addrspace(1) %gep.1, align 16
Abinav Puthan Purayil4baf8f02022-07-13 23:34:57 +0530112 %add.1 = fadd <2 x double> %add.0, %ld.1
113
114 %id.2 = add nuw nsw i64 %id.0, 2
Yashwant Singh9e0d8ab2022-12-22 09:47:28 +0530115 %gep.2 = getelementptr inbounds <2 x double>, ptr addrspace(1) %ptr.0, i64 %id.2
116 %ld.2 = load <2 x double>, ptr addrspace(1) %gep.2, align 16
Abinav Puthan Purayil4baf8f02022-07-13 23:34:57 +0530117 %add.2 = fadd <2 x double> %add.1, %ld.2
118
119 %id.3 = add nuw nsw i64 %id.0, 3
Yashwant Singh9e0d8ab2022-12-22 09:47:28 +0530120 %gep.3= getelementptr inbounds <2 x double>, ptr addrspace(1) %ptr.0, i64 %id.3
121 %ld.3 = load <2 x double>, ptr addrspace(1) %gep.3, align 16
Abinav Puthan Purayil4baf8f02022-07-13 23:34:57 +0530122 %add.3 = fadd <2 x double> %add.2, %ld.3
123
124 %id.4 = add nuw nsw i64 %id.0, 4
Yashwant Singh9e0d8ab2022-12-22 09:47:28 +0530125 %gep.4= getelementptr inbounds <2 x double>, ptr addrspace(1) %ptr.0, i64 %id.4
126 %ld.4 = load <2 x double>, ptr addrspace(1) %gep.4, align 16
Abinav Puthan Purayil4baf8f02022-07-13 23:34:57 +0530127 %add.4 = fadd <2 x double> %add.3, %ld.4
128
Yashwant Singh9e0d8ab2022-12-22 09:47:28 +0530129 store <2 x double> %add.4, ptr addrspace(1) %ptr.1, align 16
Abinav Puthan Purayil4baf8f02022-07-13 23:34:57 +0530130 %cond = icmp eq i32 %arg.1, 0
131 br i1 %cond, label %bb.true, label %bb.ret
132
133bb.true:
134 %i0.arg.0 = extractelement <2 x double> %arg.0, i32 0
135 %i1.arg.0 = extractelement <2 x double> %arg.0, i32 1
136 %add.1.0 = fadd double %i0.arg.0, %i1.arg.0
137 %i0.arg.2 = extractelement <4 x double> %arg.2, i32 0
138 %i1.arg.2 = extractelement <4 x double> %arg.2, i32 1
139 %add.1.1 = fadd double %i0.arg.2, %i1.arg.2
140 %add.1.2 = fadd double %add.1.0, %add.1.1
141 %i2.arg.2 = extractelement <4 x double> %arg.2, i32 2
142 %i3.arg.2 = extractelement <4 x double> %arg.2, i32 3
143 %add.1.3 = fadd double %i2.arg.2, %i3.arg.2
144 %add.1.4 = fadd double %add.1.2, %add.1.3
145 %i0.add.0 = extractelement <2 x double> %add.0, i32 0
146 %i1.add.0 = extractelement <2 x double> %add.0, i32 1
147 %add.1.5 = fadd double %i0.add.0, %i1.add.0
148 %add.1.6 = fadd double %add.1.4, %add.1.5
149 %i0.add.1 = extractelement <2 x double> %add.1, i32 0
150 %i1.add.1 = extractelement <2 x double> %add.1, i32 1
151 %add.1.7 = fadd double %i0.add.1, %i1.add.1
152 %add.1.8 = fadd double %add.1.6, %add.1.7
153 %i0.add.2 = extractelement <2 x double> %add.2, i32 0
154 %i1.add.2 = extractelement <2 x double> %add.2, i32 1
155 %add.1.9 = fadd double %i0.add.2, %i1.add.2
156 %add.1.10 = fadd double %add.1.8, %add.1.9
157
Yashwant Singh9e0d8ab2022-12-22 09:47:28 +0530158 store double %add.1.8, ptr addrspace(1) %ptr.1, align 8
Abinav Puthan Purayil4baf8f02022-07-13 23:34:57 +0530159 br label %bb.ret
160
161bb.ret:
162 ret void
163}
164
Stanislav Mekhanoshin1c538422018-05-25 17:25:12 +0000165; GCN-LABEL: {{^}}test_large_stride:
Jay Foadf05bce82020-07-17 12:57:23 +0100166; GCN: MemoryBound: 0
167; GCN: WaveLimiterHint : 1
Yashwant Singh9e0d8ab2022-12-22 09:47:28 +0530168define amdgpu_kernel void @test_large_stride(ptr addrspace(1) nocapture %arg) {
Matt Arsenault2b0a88f2024-08-11 15:07:17 +0400169; CHECK-LABEL: define amdgpu_kernel void @test_large_stride(
Nikita Popov29441e42025-01-29 16:56:47 +0100170; CHECK-SAME: ptr addrspace(1) captures(none) [[ARG:%.*]]) #[[ATTR2:[0-9]+]] {
Matt Arsenault2b0a88f2024-08-11 15:07:17 +0400171; CHECK-NEXT: [[BB:.*:]]
172; CHECK-NEXT: [[TMP:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[ARG]], i64 4096
173; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[TMP]], align 4
174; CHECK-NEXT: [[MUL1:%.*]] = mul i32 [[TMP1]], [[TMP1]]
175; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[ARG]], i64 1
176; CHECK-NEXT: store i32 [[MUL1]], ptr addrspace(1) [[TMP2]], align 4
177; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[ARG]], i64 8192
178; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr addrspace(1) [[TMP3]], align 4
179; CHECK-NEXT: [[MUL4:%.*]] = mul i32 [[TMP4]], [[TMP4]]
180; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[ARG]], i64 2
181; CHECK-NEXT: store i32 [[MUL4]], ptr addrspace(1) [[TMP5]], align 4
182; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[ARG]], i64 12288
183; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr addrspace(1) [[TMP6]], align 4
184; CHECK-NEXT: [[MUL7:%.*]] = mul i32 [[TMP7]], [[TMP7]]
185; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[ARG]], i64 3
186; CHECK-NEXT: store i32 [[MUL7]], ptr addrspace(1) [[TMP8]], align 4
187; CHECK-NEXT: ret void
188;
Stanislav Mekhanoshin1c538422018-05-25 17:25:12 +0000189bb:
Yashwant Singh9e0d8ab2022-12-22 09:47:28 +0530190 %tmp = getelementptr inbounds i32, ptr addrspace(1) %arg, i64 4096
191 %tmp1 = load i32, ptr addrspace(1) %tmp, align 4
Jay Foadf05bce82020-07-17 12:57:23 +0100192 %mul1 = mul i32 %tmp1, %tmp1
Yashwant Singh9e0d8ab2022-12-22 09:47:28 +0530193 %tmp2 = getelementptr inbounds i32, ptr addrspace(1) %arg, i64 1
194 store i32 %mul1, ptr addrspace(1) %tmp2, align 4
195 %tmp3 = getelementptr inbounds i32, ptr addrspace(1) %arg, i64 8192
196 %tmp4 = load i32, ptr addrspace(1) %tmp3, align 4
Jay Foadf05bce82020-07-17 12:57:23 +0100197 %mul4 = mul i32 %tmp4, %tmp4
Yashwant Singh9e0d8ab2022-12-22 09:47:28 +0530198 %tmp5 = getelementptr inbounds i32, ptr addrspace(1) %arg, i64 2
199 store i32 %mul4, ptr addrspace(1) %tmp5, align 4
200 %tmp6 = getelementptr inbounds i32, ptr addrspace(1) %arg, i64 12288
201 %tmp7 = load i32, ptr addrspace(1) %tmp6, align 4
Jay Foadf05bce82020-07-17 12:57:23 +0100202 %mul7 = mul i32 %tmp7, %tmp7
Yashwant Singh9e0d8ab2022-12-22 09:47:28 +0530203 %tmp8 = getelementptr inbounds i32, ptr addrspace(1) %arg, i64 3
204 store i32 %mul7, ptr addrspace(1) %tmp8, align 4
Stanislav Mekhanoshin1c538422018-05-25 17:25:12 +0000205 ret void
206}
207
208; GCN-LABEL: {{^}}test_indirect:
Yashwant Singh9e0d8ab2022-12-22 09:47:28 +0530209; GCN: MemoryBound: 1
Jay Foad2dc3d1b2020-07-17 12:56:29 +0100210; GCN: WaveLimiterHint : 1
Yashwant Singh9e0d8ab2022-12-22 09:47:28 +0530211define amdgpu_kernel void @test_indirect(ptr addrspace(1) nocapture %arg) {
Matt Arsenault2b0a88f2024-08-11 15:07:17 +0400212; CHECK-LABEL: define amdgpu_kernel void @test_indirect(
Nikita Popov29441e42025-01-29 16:56:47 +0100213; CHECK-SAME: ptr addrspace(1) captures(none) [[ARG:%.*]]) #[[ATTR2]] {
Matt Arsenault2b0a88f2024-08-11 15:07:17 +0400214; CHECK-NEXT: [[BB:.*:]]
215; CHECK-NEXT: [[TMP:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[ARG]], i64 1
216; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[ARG]], i64 2
217; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[ARG]], i64 3
218; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr addrspace(1) [[ARG]], align 4
219; CHECK-NEXT: [[TMP5:%.*]] = extractelement <4 x i32> [[TMP4]], i32 0
220; CHECK-NEXT: [[TMP6:%.*]] = sext i32 [[TMP5]] to i64
221; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[ARG]], i64 [[TMP6]]
222; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr addrspace(1) [[TMP7]], align 4
223; CHECK-NEXT: store i32 [[TMP8]], ptr addrspace(1) [[ARG]], align 4
224; CHECK-NEXT: [[TMP9:%.*]] = extractelement <4 x i32> [[TMP4]], i32 1
225; CHECK-NEXT: [[TMP10:%.*]] = sext i32 [[TMP9]] to i64
226; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[ARG]], i64 [[TMP10]]
227; CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr addrspace(1) [[TMP11]], align 4
228; CHECK-NEXT: store i32 [[TMP12]], ptr addrspace(1) [[TMP]], align 4
229; CHECK-NEXT: [[TMP13:%.*]] = extractelement <4 x i32> [[TMP4]], i32 2
230; CHECK-NEXT: [[TMP14:%.*]] = sext i32 [[TMP13]] to i64
231; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[ARG]], i64 [[TMP14]]
232; CHECK-NEXT: [[TMP16:%.*]] = load i32, ptr addrspace(1) [[TMP15]], align 4
233; CHECK-NEXT: store i32 [[TMP16]], ptr addrspace(1) [[TMP1]], align 4
234; CHECK-NEXT: [[TMP17:%.*]] = extractelement <4 x i32> [[TMP4]], i32 3
235; CHECK-NEXT: [[TMP18:%.*]] = sext i32 [[TMP17]] to i64
236; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[ARG]], i64 [[TMP18]]
237; CHECK-NEXT: [[TMP20:%.*]] = load i32, ptr addrspace(1) [[TMP19]], align 4
238; CHECK-NEXT: store i32 [[TMP20]], ptr addrspace(1) [[TMP2]], align 4
239; CHECK-NEXT: ret void
240;
Stanislav Mekhanoshin1c538422018-05-25 17:25:12 +0000241bb:
Yashwant Singh9e0d8ab2022-12-22 09:47:28 +0530242 %tmp = getelementptr inbounds i32, ptr addrspace(1) %arg, i64 1
243 %tmp1 = getelementptr inbounds i32, ptr addrspace(1) %arg, i64 2
244 %tmp2 = getelementptr inbounds i32, ptr addrspace(1) %arg, i64 3
245 %tmp4 = load <4 x i32>, ptr addrspace(1) %arg, align 4
Stanislav Mekhanoshin1c538422018-05-25 17:25:12 +0000246 %tmp5 = extractelement <4 x i32> %tmp4, i32 0
247 %tmp6 = sext i32 %tmp5 to i64
Yashwant Singh9e0d8ab2022-12-22 09:47:28 +0530248 %tmp7 = getelementptr inbounds i32, ptr addrspace(1) %arg, i64 %tmp6
249 %tmp8 = load i32, ptr addrspace(1) %tmp7, align 4
250 store i32 %tmp8, ptr addrspace(1) %arg, align 4
Stanislav Mekhanoshin1c538422018-05-25 17:25:12 +0000251 %tmp9 = extractelement <4 x i32> %tmp4, i32 1
252 %tmp10 = sext i32 %tmp9 to i64
Yashwant Singh9e0d8ab2022-12-22 09:47:28 +0530253 %tmp11 = getelementptr inbounds i32, ptr addrspace(1) %arg, i64 %tmp10
254 %tmp12 = load i32, ptr addrspace(1) %tmp11, align 4
255 store i32 %tmp12, ptr addrspace(1) %tmp, align 4
Stanislav Mekhanoshin1c538422018-05-25 17:25:12 +0000256 %tmp13 = extractelement <4 x i32> %tmp4, i32 2
257 %tmp14 = sext i32 %tmp13 to i64
Yashwant Singh9e0d8ab2022-12-22 09:47:28 +0530258 %tmp15 = getelementptr inbounds i32, ptr addrspace(1) %arg, i64 %tmp14
259 %tmp16 = load i32, ptr addrspace(1) %tmp15, align 4
260 store i32 %tmp16, ptr addrspace(1) %tmp1, align 4
Stanislav Mekhanoshin1c538422018-05-25 17:25:12 +0000261 %tmp17 = extractelement <4 x i32> %tmp4, i32 3
262 %tmp18 = sext i32 %tmp17 to i64
Yashwant Singh9e0d8ab2022-12-22 09:47:28 +0530263 %tmp19 = getelementptr inbounds i32, ptr addrspace(1) %arg, i64 %tmp18
264 %tmp20 = load i32, ptr addrspace(1) %tmp19, align 4
265 store i32 %tmp20, ptr addrspace(1) %tmp2, align 4
Stanislav Mekhanoshin1c538422018-05-25 17:25:12 +0000266 ret void
267}
268
Stanislav Mekhanoshin7ba3fc72018-06-11 16:50:49 +0000269; GCN-LABEL: {{^}}test_indirect_through_phi:
Jay Foadfdaf6062022-03-29 17:06:50 +0100270; GCN: MemoryBound: 0
Jay Foadc246b7b2022-03-31 13:39:02 +0100271; GCN: WaveLimiterHint : 0
Yashwant Singh9e0d8ab2022-12-22 09:47:28 +0530272define amdgpu_kernel void @test_indirect_through_phi(ptr addrspace(1) %arg) {
Matt Arsenault2b0a88f2024-08-11 15:07:17 +0400273; CHECK-LABEL: define amdgpu_kernel void @test_indirect_through_phi(
274; CHECK-SAME: ptr addrspace(1) [[ARG:%.*]]) {
275; CHECK-NEXT: [[BB:.*]]:
276; CHECK-NEXT: [[LOAD:%.*]] = load float, ptr addrspace(1) [[ARG]], align 8
277; CHECK-NEXT: [[LOAD_F:%.*]] = bitcast float [[LOAD]] to i32
278; CHECK-NEXT: [[N:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
279; CHECK-NEXT: br label %[[BB1:.*]]
280; CHECK: [[BB1]]:
281; CHECK-NEXT: [[PHI:%.*]] = phi i32 [ [[LOAD_F]], %[[BB]] ], [ [[AND2:%.*]], %[[BB1]] ]
282; CHECK-NEXT: [[IND:%.*]] = phi i32 [ 0, %[[BB]] ], [ [[INC2:%.*]], %[[BB1]] ]
283; CHECK-NEXT: [[AND1:%.*]] = and i32 [[PHI]], [[N]]
284; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds float, ptr addrspace(1) [[ARG]], i32 [[AND1]]
285; CHECK-NEXT: store float [[LOAD]], ptr addrspace(1) [[GEP]], align 4
286; CHECK-NEXT: [[INC1:%.*]] = add nsw i32 [[PHI]], 1310720
287; CHECK-NEXT: [[AND2]] = and i32 [[INC1]], [[N]]
288; CHECK-NEXT: [[INC2]] = add nuw nsw i32 [[IND]], 1
289; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[INC2]], 1024
290; CHECK-NEXT: br i1 [[CMP]], label %[[BB2:.*]], label %[[BB1]]
291; CHECK: [[BB2]]:
292; CHECK-NEXT: ret void
293;
Stanislav Mekhanoshin7ba3fc72018-06-11 16:50:49 +0000294bb:
Yashwant Singh9e0d8ab2022-12-22 09:47:28 +0530295 %load = load float, ptr addrspace(1) %arg, align 8
Stanislav Mekhanoshin7ba3fc72018-06-11 16:50:49 +0000296 %load.f = bitcast float %load to i32
297 %n = tail call i32 @llvm.amdgcn.workitem.id.x()
298 br label %bb1
299
300bb1: ; preds = %bb1, %bb
301 %phi = phi i32 [ %load.f, %bb ], [ %and2, %bb1 ]
302 %ind = phi i32 [ 0, %bb ], [ %inc2, %bb1 ]
303 %and1 = and i32 %phi, %n
Yashwant Singh9e0d8ab2022-12-22 09:47:28 +0530304 %gep = getelementptr inbounds float, ptr addrspace(1) %arg, i32 %and1
305 store float %load, ptr addrspace(1) %gep, align 4
Stanislav Mekhanoshin7ba3fc72018-06-11 16:50:49 +0000306 %inc1 = add nsw i32 %phi, 1310720
307 %and2 = and i32 %inc1, %n
308 %inc2 = add nuw nsw i32 %ind, 1
309 %cmp = icmp eq i32 %inc2, 1024
310 br i1 %cmp, label %bb2, label %bb1
311
312bb2: ; preds = %bb1
313 ret void
314}
315
Matt Arsenault2b0a88f2024-08-11 15:07:17 +0400316define void @test_membound_func(ptr addrspace(1) nocapture readonly %arg, ptr addrspace(1) nocapture %arg1) {
317; CHECK-LABEL: define void @test_membound_func(
Nikita Popov29441e42025-01-29 16:56:47 +0100318; CHECK-SAME: ptr addrspace(1) readonly captures(none) [[ARG:%.*]], ptr addrspace(1) captures(none) [[ARG1:%.*]]) #[[ATTR1]] {
Matt Arsenault2b0a88f2024-08-11 15:07:17 +0400319; CHECK-NEXT: [[BB:.*:]]
320; CHECK-NEXT: [[TMP:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
321; CHECK-NEXT: [[TMP2:%.*]] = zext i32 [[TMP]] to i64
322; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds <4 x i32>, ptr addrspace(1) [[ARG]], i64 [[TMP2]]
323; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr addrspace(1) [[TMP3]], align 16
324; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds <4 x i32>, ptr addrspace(1) [[ARG1]], i64 [[TMP2]]
325; CHECK-NEXT: store <4 x i32> [[TMP4]], ptr addrspace(1) [[TMP5]], align 16
326; CHECK-NEXT: [[TMP6:%.*]] = add nuw nsw i64 [[TMP2]], 1
327; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds <4 x i32>, ptr addrspace(1) [[ARG]], i64 [[TMP6]]
328; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i32>, ptr addrspace(1) [[TMP7]], align 16
329; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds <4 x i32>, ptr addrspace(1) [[ARG1]], i64 [[TMP6]]
330; CHECK-NEXT: store <4 x i32> [[TMP8]], ptr addrspace(1) [[TMP9]], align 16
331; CHECK-NEXT: ret void
332;
333bb:
334 %tmp = tail call i32 @llvm.amdgcn.workitem.id.x()
335 %tmp2 = zext i32 %tmp to i64
336 %tmp3 = getelementptr inbounds <4 x i32>, ptr addrspace(1) %arg, i64 %tmp2
337 %tmp4 = load <4 x i32>, ptr addrspace(1) %tmp3, align 16
338 %tmp5 = getelementptr inbounds <4 x i32>, ptr addrspace(1) %arg1, i64 %tmp2
339 store <4 x i32> %tmp4, ptr addrspace(1) %tmp5, align 16
340 %tmp6 = add nuw nsw i64 %tmp2, 1
341 %tmp7 = getelementptr inbounds <4 x i32>, ptr addrspace(1) %arg, i64 %tmp6
342 %tmp8 = load <4 x i32>, ptr addrspace(1) %tmp7, align 16
343 %tmp9 = getelementptr inbounds <4 x i32>, ptr addrspace(1) %arg1, i64 %tmp6
344 store <4 x i32> %tmp8, ptr addrspace(1) %tmp9, align 16
345 ret void
346}
347
348; GCN-LABEL: {{^}}kernel_call_test_membound_func:
349; GCN: MemoryBound: 1
350; GCN: WaveLimiterHint : 1
351define amdgpu_kernel void @kernel_call_test_membound_func(ptr addrspace(1) nocapture readonly %arg, ptr addrspace(1) nocapture %arg1) {
352; CHECK-LABEL: define amdgpu_kernel void @kernel_call_test_membound_func(
Nikita Popov29441e42025-01-29 16:56:47 +0100353; CHECK-SAME: ptr addrspace(1) readonly captures(none) [[ARG:%.*]], ptr addrspace(1) captures(none) [[ARG1:%.*]]) #[[ATTR0]] {
354; CHECK-NEXT: call void @test_membound_func(ptr addrspace(1) readonly captures(none) [[ARG]], ptr addrspace(1) captures(none) [[ARG1]])
Matt Arsenault2b0a88f2024-08-11 15:07:17 +0400355; CHECK-NEXT: ret void
356;
357 call void @test_membound_func(ptr addrspace(1) nocapture readonly %arg, ptr addrspace(1) nocapture %arg1)
358 ret void
359}
360
361; TODO: Probably should assume yes?
362; GCN-LABEL: {{^}}kernel_indirect_call:
363; GCN: MemoryBound: 0
364; GCN: WaveLimiterHint : 0
365define amdgpu_kernel void @kernel_indirect_call(ptr addrspace(1) nocapture readonly %arg, ptr addrspace(1) nocapture %arg1, ptr %fptr) {
366; CHECK-LABEL: define amdgpu_kernel void @kernel_indirect_call(
Nikita Popov29441e42025-01-29 16:56:47 +0100367; CHECK-SAME: ptr addrspace(1) readonly captures(none) [[ARG:%.*]], ptr addrspace(1) captures(none) [[ARG1:%.*]], ptr [[FPTR:%.*]]) {
368; CHECK-NEXT: call void [[FPTR]](ptr addrspace(1) readonly captures(none) [[ARG]], ptr addrspace(1) captures(none) [[ARG1]])
Matt Arsenault2b0a88f2024-08-11 15:07:17 +0400369; CHECK-NEXT: ret void
370;
371 call void %fptr(ptr addrspace(1) nocapture readonly %arg, ptr addrspace(1) nocapture %arg1)
372 ret void
373}
374
375declare void @extern()
376
377define void @maybe_recursive_test_membound_func(ptr addrspace(1) nocapture readonly %arg, ptr addrspace(1) nocapture %arg1) {
378; CHECK-LABEL: define void @maybe_recursive_test_membound_func(
Nikita Popov29441e42025-01-29 16:56:47 +0100379; CHECK-SAME: ptr addrspace(1) readonly captures(none) [[ARG:%.*]], ptr addrspace(1) captures(none) [[ARG1:%.*]]) #[[ATTR1]] {
Matt Arsenault2b0a88f2024-08-11 15:07:17 +0400380; CHECK-NEXT: [[BB:.*:]]
381; CHECK-NEXT: [[TMP:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
382; CHECK-NEXT: [[TMP2:%.*]] = zext i32 [[TMP]] to i64
383; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds <4 x i32>, ptr addrspace(1) [[ARG]], i64 [[TMP2]]
384; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr addrspace(1) [[TMP3]], align 16
385; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds <4 x i32>, ptr addrspace(1) [[ARG1]], i64 [[TMP2]]
386; CHECK-NEXT: store <4 x i32> [[TMP4]], ptr addrspace(1) [[TMP5]], align 16
387; CHECK-NEXT: [[TMP6:%.*]] = add nuw nsw i64 [[TMP2]], 1
388; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds <4 x i32>, ptr addrspace(1) [[ARG]], i64 [[TMP6]]
389; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i32>, ptr addrspace(1) [[TMP7]], align 16
390; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds <4 x i32>, ptr addrspace(1) [[ARG1]], i64 [[TMP6]]
391; CHECK-NEXT: store <4 x i32> [[TMP8]], ptr addrspace(1) [[TMP9]], align 16
392; CHECK-NEXT: call void @extern()
393; CHECK-NEXT: ret void
394;
395bb:
396 %tmp = tail call i32 @llvm.amdgcn.workitem.id.x()
397 %tmp2 = zext i32 %tmp to i64
398 %tmp3 = getelementptr inbounds <4 x i32>, ptr addrspace(1) %arg, i64 %tmp2
399 %tmp4 = load <4 x i32>, ptr addrspace(1) %tmp3, align 16
400 %tmp5 = getelementptr inbounds <4 x i32>, ptr addrspace(1) %arg1, i64 %tmp2
401 store <4 x i32> %tmp4, ptr addrspace(1) %tmp5, align 16
402 %tmp6 = add nuw nsw i64 %tmp2, 1
403 %tmp7 = getelementptr inbounds <4 x i32>, ptr addrspace(1) %arg, i64 %tmp6
404 %tmp8 = load <4 x i32>, ptr addrspace(1) %tmp7, align 16
405 %tmp9 = getelementptr inbounds <4 x i32>, ptr addrspace(1) %arg1, i64 %tmp6
406 store <4 x i32> %tmp8, ptr addrspace(1) %tmp9, align 16
407 call void @extern()
408 ret void
409}
410
411; GCN-LABEL: {{^}}kernel_call_maybe_recursive_test_membound_func:
412; GCN: MemoryBound: 1
413; GCN: WaveLimiterHint : 1
414define amdgpu_kernel void @kernel_call_maybe_recursive_test_membound_func(ptr addrspace(1) nocapture readonly %arg, ptr addrspace(1) nocapture %arg1, ptr %fptr) {
415; CHECK-LABEL: define amdgpu_kernel void @kernel_call_maybe_recursive_test_membound_func(
Nikita Popov29441e42025-01-29 16:56:47 +0100416; CHECK-SAME: ptr addrspace(1) readonly captures(none) [[ARG:%.*]], ptr addrspace(1) captures(none) [[ARG1:%.*]], ptr [[FPTR:%.*]]) #[[ATTR0]] {
417; CHECK-NEXT: call void @maybe_recursive_test_membound_func(ptr addrspace(1) readonly captures(none) [[ARG]], ptr addrspace(1) captures(none) [[ARG1]])
Matt Arsenault2b0a88f2024-08-11 15:07:17 +0400418; CHECK-NEXT: ret void
419;
420 call void @maybe_recursive_test_membound_func(ptr addrspace(1) nocapture readonly %arg, ptr addrspace(1) nocapture %arg1)
421 ret void
422}
423
424define void @mutually_recursive_test_membound_func_0(ptr addrspace(1) nocapture readonly %arg, ptr addrspace(1) nocapture %arg1) {
425; CHECK-LABEL: define void @mutually_recursive_test_membound_func_0(
Nikita Popov29441e42025-01-29 16:56:47 +0100426; CHECK-SAME: ptr addrspace(1) readonly captures(none) [[ARG:%.*]], ptr addrspace(1) captures(none) [[ARG1:%.*]]) #[[ATTR1]] {
Matt Arsenault2b0a88f2024-08-11 15:07:17 +0400427; CHECK-NEXT: [[TMP:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
428; CHECK-NEXT: [[TMP2:%.*]] = zext i32 [[TMP]] to i64
429; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds <4 x i32>, ptr addrspace(1) [[ARG]], i64 [[TMP2]]
430; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr addrspace(1) [[TMP3]], align 16
431; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds <4 x i32>, ptr addrspace(1) [[ARG1]], i64 [[TMP2]]
432; CHECK-NEXT: store <4 x i32> [[TMP4]], ptr addrspace(1) [[TMP5]], align 16
Nikita Popov29441e42025-01-29 16:56:47 +0100433; CHECK-NEXT: call void @mutually_recursive_test_membound_func_0(ptr addrspace(1) readonly captures(none) [[ARG]], ptr addrspace(1) captures(none) [[ARG1]])
Matt Arsenault2b0a88f2024-08-11 15:07:17 +0400434; CHECK-NEXT: ret void
435;
436 %tmp = tail call i32 @llvm.amdgcn.workitem.id.x()
437 %tmp2 = zext i32 %tmp to i64
438 %tmp3 = getelementptr inbounds <4 x i32>, ptr addrspace(1) %arg, i64 %tmp2
439 %tmp4 = load <4 x i32>, ptr addrspace(1) %tmp3, align 16
440 %tmp5 = getelementptr inbounds <4 x i32>, ptr addrspace(1) %arg1, i64 %tmp2
441 store <4 x i32> %tmp4, ptr addrspace(1) %tmp5, align 16
442 call void @mutually_recursive_test_membound_func_0(ptr addrspace(1) nocapture readonly %arg, ptr addrspace(1) nocapture %arg1)
443 ret void
444}
445
446define void @mutually_recursive_test_membound_func_1(ptr addrspace(1) nocapture readonly %arg, ptr addrspace(1) nocapture %arg1) {
447; CHECK-LABEL: define void @mutually_recursive_test_membound_func_1(
Nikita Popov29441e42025-01-29 16:56:47 +0100448; CHECK-SAME: ptr addrspace(1) readonly captures(none) [[ARG:%.*]], ptr addrspace(1) captures(none) [[ARG1:%.*]]) #[[ATTR1]] {
Matt Arsenault2b0a88f2024-08-11 15:07:17 +0400449; CHECK-NEXT: [[TMP:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
450; CHECK-NEXT: [[TMP2:%.*]] = zext i32 [[TMP]] to i64
451; CHECK-NEXT: [[TMP6:%.*]] = add nuw nsw i64 [[TMP2]], 1
452; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds <4 x i32>, ptr addrspace(1) [[ARG]], i64 [[TMP6]]
453; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i32>, ptr addrspace(1) [[TMP7]], align 16
454; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds <4 x i32>, ptr addrspace(1) [[ARG1]], i64 [[TMP6]]
455; CHECK-NEXT: store <4 x i32> [[TMP8]], ptr addrspace(1) [[TMP9]], align 16
Nikita Popov29441e42025-01-29 16:56:47 +0100456; CHECK-NEXT: call void @mutually_recursive_test_membound_func_1(ptr addrspace(1) readonly captures(none) [[ARG]], ptr addrspace(1) captures(none) [[ARG1]])
Matt Arsenault2b0a88f2024-08-11 15:07:17 +0400457; CHECK-NEXT: ret void
458;
459 %tmp = tail call i32 @llvm.amdgcn.workitem.id.x()
460 %tmp2 = zext i32 %tmp to i64
461 %tmp6 = add nuw nsw i64 %tmp2, 1
462 %tmp7 = getelementptr inbounds <4 x i32>, ptr addrspace(1) %arg, i64 %tmp6
463 %tmp8 = load <4 x i32>, ptr addrspace(1) %tmp7, align 16
464 %tmp9 = getelementptr inbounds <4 x i32>, ptr addrspace(1) %arg1, i64 %tmp6
465 store <4 x i32> %tmp8, ptr addrspace(1) %tmp9, align 16
466 call void @mutually_recursive_test_membound_func_1(ptr addrspace(1) nocapture readonly %arg, ptr addrspace(1) nocapture %arg1)
467 ret void
468}
469
470; GCN-LABEL: {{^}}kernel_call_mutually_recursive_test_membound_func_0:
471; GCN: MemoryBound: 1
472; GCN: WaveLimiterHint : 1
473define amdgpu_kernel void @kernel_call_mutually_recursive_test_membound_func_0(ptr addrspace(1) nocapture readonly %arg, ptr addrspace(1) nocapture %arg1, ptr %fptr) {
474; CHECK-LABEL: define amdgpu_kernel void @kernel_call_mutually_recursive_test_membound_func_0(
Nikita Popov29441e42025-01-29 16:56:47 +0100475; CHECK-SAME: ptr addrspace(1) readonly captures(none) [[ARG:%.*]], ptr addrspace(1) captures(none) [[ARG1:%.*]], ptr [[FPTR:%.*]]) #[[ATTR0]] {
476; CHECK-NEXT: call void @mutually_recursive_test_membound_func_0(ptr addrspace(1) readonly captures(none) [[ARG]], ptr addrspace(1) captures(none) [[ARG1]])
Matt Arsenault2b0a88f2024-08-11 15:07:17 +0400477; CHECK-NEXT: ret void
478;
479 call void @mutually_recursive_test_membound_func_0(ptr addrspace(1) nocapture readonly %arg, ptr addrspace(1) nocapture %arg1)
480 ret void
481}
482
Stanislav Mekhanoshin1c538422018-05-25 17:25:12 +0000483declare i32 @llvm.amdgcn.workitem.id.x()
Matt Arsenault2b0a88f2024-08-11 15:07:17 +0400484
485
486;.
487; CHECK: attributes #[[ATTR0]] = { "amdgpu-memory-bound"="true" "amdgpu-wave-limiter"="true" }
488; CHECK: attributes #[[ATTR1]] = { "amdgpu-memory-bound"="true" }
489; CHECK: attributes #[[ATTR2]] = { "amdgpu-wave-limiter"="true" }
490; CHECK: attributes #[[ATTR3:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
491;.