blob: 364dac905a3d24ea3127b58346acf81c811bb68c [file] [log] [blame]
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
// expected-no-diagnostics
#ifndef HEADER
#define HEADER
// Test host codegen.
// RUN: %clang_cc1 -DCK1 -verify -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK1
// RUN: %clang_cc1 -DCK1 -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
// RUN: %clang_cc1 -DCK1 -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK2
// RUN: %clang_cc1 -DCK1 -verify -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK3
// RUN: %clang_cc1 -DCK1 -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
// RUN: %clang_cc1 -DCK1 -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK4
// RUN: %clang_cc1 -DCK1 -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK5
// RUN: %clang_cc1 -DCK1 -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
// RUN: %clang_cc1 -DCK1 -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK6
// RUN: %clang_cc1 -DCK1 -verify -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK7
// RUN: %clang_cc1 -DCK1 -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
// RUN: %clang_cc1 -DCK1 -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK8
// RUN: %clang_cc1 -DCK1 -verify -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
// RUN: %clang_cc1 -DCK1 -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
// RUN: %clang_cc1 -DCK1 -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
// RUN: %clang_cc1 -DCK1 -verify -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
// RUN: %clang_cc1 -DCK1 -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
// RUN: %clang_cc1 -DCK1 -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
#ifdef CK1
template <typename T, int X, long long Y>
struct SS{
T a[X];
float b;
int foo(void) {
#pragma omp target teams distribute parallel for
for(int i = 0; i < X; i++) {
a[i] = (T)0;
}
#pragma omp target teams distribute parallel for schedule(static)
for(int i = 0; i < X; i++) {
a[i] = (T)0;
}
#pragma omp target teams distribute parallel for schedule(static, X/2)
for(int i = 0; i < X; i++) {
a[i] = (T)0;
}
#pragma omp target teams distribute parallel for schedule(dynamic)
for(int i = 0; i < X; i++) {
a[i] = (T)0;
}
#pragma omp target teams distribute parallel for schedule(dynamic, X/2)
for(int i = 0; i < X; i++) {
a[i] = (T)0;
}
return a[0];
}
};
int teams_template_struct(void) {
SS<int, 123, 456> V;
return V.foo();
}
#endif // CK1
// Test host codegen.
// RUN: %clang_cc1 -DCK2 -verify -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK13
// RUN: %clang_cc1 -DCK2 -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
// RUN: %clang_cc1 -DCK2 -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK14
// RUN: %clang_cc1 -DCK2 -verify -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK15
// RUN: %clang_cc1 -DCK2 -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
// RUN: %clang_cc1 -DCK2 -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK16
// RUN: %clang_cc1 -DCK2 -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK17
// RUN: %clang_cc1 -DCK2 -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
// RUN: %clang_cc1 -DCK2 -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK18
// RUN: %clang_cc1 -DCK2 -verify -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK19
// RUN: %clang_cc1 -DCK2 -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
// RUN: %clang_cc1 -DCK2 -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK20
// RUN: %clang_cc1 -DCK2 -verify -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
// RUN: %clang_cc1 -DCK2 -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
// RUN: %clang_cc1 -DCK2 -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
// RUN: %clang_cc1 -DCK2 -verify -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
// RUN: %clang_cc1 -DCK2 -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
// RUN: %clang_cc1 -DCK2 -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
#ifdef CK2
template <typename T, int n>
int tmain(T argc) {
T a[n];
int m = 10;
#pragma omp target teams distribute parallel for
for(int i = 0; i < n; i++) {
a[i] = (T)0;
}
#pragma omp target teams distribute parallel for schedule(static)
for(int i = 0; i < n; i++) {
a[i] = (T)0;
}
#pragma omp target teams distribute parallel for schedule(static, m)
for(int i = 0; i < n; i++) {
a[i] = (T)0;
}
#pragma omp target teams distribute parallel for schedule(dynamic)
for(int i = 0; i < n; i++) {
a[i] = (T)0;
}
#pragma omp target teams distribute parallel for schedule(dynamic, m)
for(int i = 0; i < n; i++) {
a[i] = (T)0;
}
return 0;
}
int main (int argc, char **argv) {
int n = 100;
int a[n];
int m = 10;
#pragma omp target teams distribute parallel for
for(int i = 0; i < n; i++) {
a[i] = 0;
}
#pragma omp target teams distribute parallel for dist_schedule(static)
for(int i = 0; i < n; i++) {
a[i] = 0;
}
#pragma omp target teams distribute parallel for dist_schedule(static, m)
for(int i = 0; i < n; i++) {
a[i] = 0;
}
#pragma omp target teams distribute parallel for schedule(dynamic)
for(int i = 0; i < n; i++) {
a[i] = 0;
}
#pragma omp target teams distribute parallel for schedule(dynamic, m)
for(int i = 0; i < n; i++) {
a[i] = 0;
}
return tmain<int, 10>(argc);
}
#endif // CK2
#endif // #ifndef HEADER
// CHECK1-LABEL: define {{[^@]+}}@_Z21teams_template_structv
// CHECK1-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[V:%.*]] = alloca [[STRUCT_SS:%.*]], align 4
// CHECK1-NEXT: [[CALL:%.*]] = call signext i32 @_ZN2SSIiLi123ELx456EE3fooEv(%struct.SS* nonnull align 4 dereferenceable(496) [[V]])
// CHECK1-NEXT: ret i32 [[CALL]]
//
//
// CHECK1-LABEL: define {{[^@]+}}@_ZN2SSIiLi123ELx456EE3fooEv
// CHECK1-SAME: (%struct.SS* nonnull align 4 dereferenceable(496) [[THIS:%.*]]) #[[ATTR0]] comdat align 2 {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8
// CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8
// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS3:%.*]] = alloca [1 x i8*], align 8
// CHECK1-NEXT: [[DOTOFFLOAD_PTRS4:%.*]] = alloca [1 x i8*], align 8
// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS5:%.*]] = alloca [1 x i8*], align 8
// CHECK1-NEXT: [[_TMP6:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS10:%.*]] = alloca [1 x i8*], align 8
// CHECK1-NEXT: [[DOTOFFLOAD_PTRS11:%.*]] = alloca [1 x i8*], align 8
// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS12:%.*]] = alloca [1 x i8*], align 8
// CHECK1-NEXT: [[_TMP13:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS17:%.*]] = alloca [1 x i8*], align 8
// CHECK1-NEXT: [[DOTOFFLOAD_PTRS18:%.*]] = alloca [1 x i8*], align 8
// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS19:%.*]] = alloca [1 x i8*], align 8
// CHECK1-NEXT: [[_TMP20:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS24:%.*]] = alloca [1 x i8*], align 8
// CHECK1-NEXT: [[DOTOFFLOAD_PTRS25:%.*]] = alloca [1 x i8*], align 8
// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS26:%.*]] = alloca [1 x i8*], align 8
// CHECK1-NEXT: [[_TMP27:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[THIS1]], i32 0, i32 0
// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK1-NEXT: [[TMP1:%.*]] = bitcast i8** [[TMP0]] to %struct.SS**
// CHECK1-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP1]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK1-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to [123 x i32]**
// CHECK1-NEXT: store [123 x i32]* [[A]], [123 x i32]** [[TMP3]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
// CHECK1-NEXT: store i8* null, i8** [[TMP4]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK1-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 123)
// CHECK1-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK1-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0
// CHECK1-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK1: omp_offload.failed:
// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36(%struct.SS* [[THIS1]]) #[[ATTR2:[0-9]+]]
// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK1: omp_offload.cont:
// CHECK1-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
// CHECK1-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to %struct.SS**
// CHECK1-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP10]], align 8
// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
// CHECK1-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to [123 x i32]**
// CHECK1-NEXT: store [123 x i32]* [[A2]], [123 x i32]** [[TMP12]], align 8
// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS5]], i64 0, i64 0
// CHECK1-NEXT: store i8* null, i8** [[TMP13]], align 8
// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
// CHECK1-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
// CHECK1-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l40.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK1-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
// CHECK1-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED7:%.*]], label [[OMP_OFFLOAD_CONT8:%.*]]
// CHECK1: omp_offload.failed7:
// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l40(%struct.SS* [[THIS1]]) #[[ATTR2]]
// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT8]]
// CHECK1: omp_offload.cont8:
// CHECK1-NEXT: [[A9:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0
// CHECK1-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to %struct.SS**
// CHECK1-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP19]], align 8
// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0
// CHECK1-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to [123 x i32]**
// CHECK1-NEXT: store [123 x i32]* [[A9]], [123 x i32]** [[TMP21]], align 8
// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS12]], i64 0, i64 0
// CHECK1-NEXT: store i8* null, i8** [[TMP22]], align 8
// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0
// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0
// CHECK1-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
// CHECK1-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l44.region_id, i32 1, i8** [[TMP23]], i8** [[TMP24]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK1-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
// CHECK1-NEXT: br i1 [[TMP26]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]]
// CHECK1: omp_offload.failed14:
// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l44(%struct.SS* [[THIS1]]) #[[ATTR2]]
// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT15]]
// CHECK1: omp_offload.cont15:
// CHECK1-NEXT: [[A16:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0
// CHECK1-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to %struct.SS**
// CHECK1-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP28]], align 8
// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0
// CHECK1-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to [123 x i32]**
// CHECK1-NEXT: store [123 x i32]* [[A16]], [123 x i32]** [[TMP30]], align 8
// CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i64 0, i64 0
// CHECK1-NEXT: store i8* null, i8** [[TMP31]], align 8
// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0
// CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0
// CHECK1-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
// CHECK1-NEXT: [[TMP34:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l49.region_id, i32 1, i8** [[TMP32]], i8** [[TMP33]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK1-NEXT: [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
// CHECK1-NEXT: br i1 [[TMP35]], label [[OMP_OFFLOAD_FAILED21:%.*]], label [[OMP_OFFLOAD_CONT22:%.*]]
// CHECK1: omp_offload.failed21:
// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l49(%struct.SS* [[THIS1]]) #[[ATTR2]]
// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT22]]
// CHECK1: omp_offload.cont22:
// CHECK1-NEXT: [[A23:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS24]], i32 0, i32 0
// CHECK1-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to %struct.SS**
// CHECK1-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP37]], align 8
// CHECK1-NEXT: [[TMP38:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS25]], i32 0, i32 0
// CHECK1-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to [123 x i32]**
// CHECK1-NEXT: store [123 x i32]* [[A23]], [123 x i32]** [[TMP39]], align 8
// CHECK1-NEXT: [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS26]], i64 0, i64 0
// CHECK1-NEXT: store i8* null, i8** [[TMP40]], align 8
// CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS24]], i32 0, i32 0
// CHECK1-NEXT: [[TMP42:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS25]], i32 0, i32 0
// CHECK1-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
// CHECK1-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l54.region_id, i32 1, i8** [[TMP41]], i8** [[TMP42]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK1-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0
// CHECK1-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED28:%.*]], label [[OMP_OFFLOAD_CONT29:%.*]]
// CHECK1: omp_offload.failed28:
// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l54(%struct.SS* [[THIS1]]) #[[ATTR2]]
// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT29]]
// CHECK1: omp_offload.cont29:
// CHECK1-NEXT: [[A30:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A30]], i64 0, i64 0
// CHECK1-NEXT: [[TMP45:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK1-NEXT: ret i32 [[TMP45]]
//
//
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36
// CHECK1-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK1-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK1-NEXT: store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK1: cond.true:
// CHECK1-NEXT: br label [[COND_END:%.*]]
// CHECK1: cond.false:
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: br label [[COND_END]]
// CHECK1: cond.end:
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK1-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, %struct.SS*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], %struct.SS* [[TMP0]])
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
// CHECK1-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK1: omp.loop.exit:
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK1-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 122, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
// CHECK1-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
// CHECK1-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 122
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK1: cond.true:
// CHECK1-NEXT: br label [[COND_END:%.*]]
// CHECK1: cond.false:
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: br label [[COND_END]]
// CHECK1: cond.end:
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
// CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK1-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[I]], align 4
// CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i64 0, i64 [[IDXPROM]]
// CHECK1-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK1: omp.body.continue:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
// CHECK1-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK1: omp.loop.exit:
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l40
// CHECK1-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK1-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..2
// CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK1-NEXT: store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK1: cond.true:
// CHECK1-NEXT: br label [[COND_END:%.*]]
// CHECK1: cond.false:
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: br label [[COND_END]]
// CHECK1: cond.end:
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK1-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, %struct.SS*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], %struct.SS* [[TMP0]])
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
// CHECK1-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK1: omp.loop.exit:
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..3
// CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK1-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 122, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
// CHECK1-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
// CHECK1-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 122
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK1: cond.true:
// CHECK1-NEXT: br label [[COND_END:%.*]]
// CHECK1: cond.false:
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: br label [[COND_END]]
// CHECK1: cond.end:
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
// CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK1-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[I]], align 4
// CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i64 0, i64 [[IDXPROM]]
// CHECK1-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK1: omp.body.continue:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
// CHECK1-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK1: omp.loop.exit:
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l44
// CHECK1-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK1-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..6
// CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK1-NEXT: store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK1: cond.true:
// CHECK1-NEXT: br label [[COND_END:%.*]]
// CHECK1: cond.false:
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: br label [[COND_END]]
// CHECK1: cond.end:
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK1-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, %struct.SS*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], %struct.SS* [[TMP0]])
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
// CHECK1-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK1: omp.loop.exit:
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..7
// CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK1-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 122, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
// CHECK1-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
// CHECK1-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 61)
// CHECK1-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK1: omp.dispatch.cond:
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK1-NEXT: [[CONV2:%.*]] = trunc i64 [[TMP6]] to i32
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], [[CONV2]]
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK1: cond.true:
// CHECK1-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK1-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP7]] to i32
// CHECK1-NEXT: br label [[COND_END:%.*]]
// CHECK1: cond.false:
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: br label [[COND_END]]
// CHECK1: cond.end:
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[CONV3]], [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ]
// CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
// CHECK1-NEXT: br i1 [[CMP4]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK1: omp.dispatch.body:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
// CHECK1-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP14]], 1
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK1-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[I]], align 4
// CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP15]] to i64
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i64 0, i64 [[IDXPROM]]
// CHECK1-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK1: omp.body.continue:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP16]], 1
// CHECK1-NEXT: store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK1: omp.dispatch.inc:
// CHECK1-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
// CHECK1-NEXT: store i32 [[ADD7]], i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
// CHECK1-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK1: omp.dispatch.end:
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l49
// CHECK1-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK1-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..10
// CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK1-NEXT: store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK1: cond.true:
// CHECK1-NEXT: br label [[COND_END:%.*]]
// CHECK1: cond.false:
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: br label [[COND_END]]
// CHECK1: cond.end:
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK1-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, %struct.SS*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], %struct.SS* [[TMP0]])
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
// CHECK1-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK1: omp.loop.exit:
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..11
// CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK1-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 122, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
// CHECK1-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
// CHECK1-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
// CHECK1-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32 35, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 1)
// CHECK1-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK1: omp.dispatch.cond:
// CHECK1-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
// CHECK1-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
// CHECK1-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK1: omp.dispatch.body:
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !10
// CHECK1-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
// CHECK1-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK1-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !10
// CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !10
// CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP12]] to i64
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i64 0, i64 [[IDXPROM]]
// CHECK1-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !10
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK1: omp.body.continue:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
// CHECK1-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP13]], 1
// CHECK1-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK1: omp.dispatch.inc:
// CHECK1-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK1: omp.dispatch.end:
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l54
// CHECK1-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK1-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..14
// CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK1-NEXT: store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK1: cond.true:
// CHECK1-NEXT: br label [[COND_END:%.*]]
// CHECK1: cond.false:
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: br label [[COND_END]]
// CHECK1: cond.end:
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK1-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK1-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, %struct.SS*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], %struct.SS* [[TMP0]])
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
// CHECK1-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK1: omp.loop.exit:
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..15
// CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK1-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 122, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
// CHECK1-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
// CHECK1-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
// CHECK1-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32 35, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 61)
// CHECK1-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK1: omp.dispatch.cond:
// CHECK1-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
// CHECK1-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
// CHECK1-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK1: omp.dispatch.body:
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !13
// CHECK1-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
// CHECK1-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK1-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !13
// CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !13
// CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP12]] to i64
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i64 0, i64 [[IDXPROM]]
// CHECK1-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !13
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK1: omp.body.continue:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13
// CHECK1-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP13]], 1
// CHECK1-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK1: omp.dispatch.inc:
// CHECK1-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK1: omp.dispatch.end:
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
// CHECK1-SAME: () #[[ATTR3:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: call void @__tgt_register_requires(i64 1)
// CHECK1-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@_Z21teams_template_structv
// CHECK2-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[V:%.*]] = alloca [[STRUCT_SS:%.*]], align 4
// CHECK2-NEXT: [[CALL:%.*]] = call signext i32 @_ZN2SSIiLi123ELx456EE3fooEv(%struct.SS* nonnull align 4 dereferenceable(496) [[V]])
// CHECK2-NEXT: ret i32 [[CALL]]
//
//
// CHECK2-LABEL: define {{[^@]+}}@_ZN2SSIiLi123ELx456EE3fooEv
// CHECK2-SAME: (%struct.SS* nonnull align 4 dereferenceable(496) [[THIS:%.*]]) #[[ATTR0]] comdat align 2 {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8
// CHECK2-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8
// CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8
// CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS3:%.*]] = alloca [1 x i8*], align 8
// CHECK2-NEXT: [[DOTOFFLOAD_PTRS4:%.*]] = alloca [1 x i8*], align 8
// CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS5:%.*]] = alloca [1 x i8*], align 8
// CHECK2-NEXT: [[_TMP6:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS10:%.*]] = alloca [1 x i8*], align 8
// CHECK2-NEXT: [[DOTOFFLOAD_PTRS11:%.*]] = alloca [1 x i8*], align 8
// CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS12:%.*]] = alloca [1 x i8*], align 8
// CHECK2-NEXT: [[_TMP13:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS17:%.*]] = alloca [1 x i8*], align 8
// CHECK2-NEXT: [[DOTOFFLOAD_PTRS18:%.*]] = alloca [1 x i8*], align 8
// CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS19:%.*]] = alloca [1 x i8*], align 8
// CHECK2-NEXT: [[_TMP20:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS24:%.*]] = alloca [1 x i8*], align 8
// CHECK2-NEXT: [[DOTOFFLOAD_PTRS25:%.*]] = alloca [1 x i8*], align 8
// CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS26:%.*]] = alloca [1 x i8*], align 8
// CHECK2-NEXT: [[_TMP27:%.*]] = alloca i32, align 4
// CHECK2-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK2-NEXT: [[THIS1:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[THIS1]], i32 0, i32 0
// CHECK2-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK2-NEXT: [[TMP1:%.*]] = bitcast i8** [[TMP0]] to %struct.SS**
// CHECK2-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP1]], align 8
// CHECK2-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK2-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to [123 x i32]**
// CHECK2-NEXT: store [123 x i32]* [[A]], [123 x i32]** [[TMP3]], align 8
// CHECK2-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
// CHECK2-NEXT: store i8* null, i8** [[TMP4]], align 8
// CHECK2-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK2-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 123)
// CHECK2-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK2-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0
// CHECK2-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK2: omp_offload.failed:
// CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36(%struct.SS* [[THIS1]]) #[[ATTR2:[0-9]+]]
// CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK2: omp_offload.cont:
// CHECK2-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
// CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
// CHECK2-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to %struct.SS**
// CHECK2-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP10]], align 8
// CHECK2-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
// CHECK2-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to [123 x i32]**
// CHECK2-NEXT: store [123 x i32]* [[A2]], [123 x i32]** [[TMP12]], align 8
// CHECK2-NEXT: [[TMP13:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS5]], i64 0, i64 0
// CHECK2-NEXT: store i8* null, i8** [[TMP13]], align 8
// CHECK2-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
// CHECK2-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
// CHECK2-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
// CHECK2-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l40.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK2-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
// CHECK2-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED7:%.*]], label [[OMP_OFFLOAD_CONT8:%.*]]
// CHECK2: omp_offload.failed7:
// CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l40(%struct.SS* [[THIS1]]) #[[ATTR2]]
// CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT8]]
// CHECK2: omp_offload.cont8:
// CHECK2-NEXT: [[A9:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
// CHECK2-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0
// CHECK2-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to %struct.SS**
// CHECK2-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP19]], align 8
// CHECK2-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0
// CHECK2-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to [123 x i32]**
// CHECK2-NEXT: store [123 x i32]* [[A9]], [123 x i32]** [[TMP21]], align 8
// CHECK2-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS12]], i64 0, i64 0
// CHECK2-NEXT: store i8* null, i8** [[TMP22]], align 8
// CHECK2-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0
// CHECK2-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0
// CHECK2-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
// CHECK2-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l44.region_id, i32 1, i8** [[TMP23]], i8** [[TMP24]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK2-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
// CHECK2-NEXT: br i1 [[TMP26]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]]
// CHECK2: omp_offload.failed14:
// CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l44(%struct.SS* [[THIS1]]) #[[ATTR2]]
// CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT15]]
// CHECK2: omp_offload.cont15:
// CHECK2-NEXT: [[A16:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
// CHECK2-NEXT: [[TMP27:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0
// CHECK2-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to %struct.SS**
// CHECK2-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP28]], align 8
// CHECK2-NEXT: [[TMP29:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0
// CHECK2-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to [123 x i32]**
// CHECK2-NEXT: store [123 x i32]* [[A16]], [123 x i32]** [[TMP30]], align 8
// CHECK2-NEXT: [[TMP31:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i64 0, i64 0
// CHECK2-NEXT: store i8* null, i8** [[TMP31]], align 8
// CHECK2-NEXT: [[TMP32:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0
// CHECK2-NEXT: [[TMP33:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0
// CHECK2-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
// CHECK2-NEXT: [[TMP34:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l49.region_id, i32 1, i8** [[TMP32]], i8** [[TMP33]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK2-NEXT: [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
// CHECK2-NEXT: br i1 [[TMP35]], label [[OMP_OFFLOAD_FAILED21:%.*]], label [[OMP_OFFLOAD_CONT22:%.*]]
// CHECK2: omp_offload.failed21:
// CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l49(%struct.SS* [[THIS1]]) #[[ATTR2]]
// CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT22]]
// CHECK2: omp_offload.cont22:
// CHECK2-NEXT: [[A23:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
// CHECK2-NEXT: [[TMP36:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS24]], i32 0, i32 0
// CHECK2-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to %struct.SS**
// CHECK2-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP37]], align 8
// CHECK2-NEXT: [[TMP38:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS25]], i32 0, i32 0
// CHECK2-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to [123 x i32]**
// CHECK2-NEXT: store [123 x i32]* [[A23]], [123 x i32]** [[TMP39]], align 8
// CHECK2-NEXT: [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS26]], i64 0, i64 0
// CHECK2-NEXT: store i8* null, i8** [[TMP40]], align 8
// CHECK2-NEXT: [[TMP41:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS24]], i32 0, i32 0
// CHECK2-NEXT: [[TMP42:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS25]], i32 0, i32 0
// CHECK2-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
// CHECK2-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l54.region_id, i32 1, i8** [[TMP41]], i8** [[TMP42]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK2-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0
// CHECK2-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED28:%.*]], label [[OMP_OFFLOAD_CONT29:%.*]]
// CHECK2: omp_offload.failed28:
// CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l54(%struct.SS* [[THIS1]]) #[[ATTR2]]
// CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT29]]
// CHECK2: omp_offload.cont29:
// CHECK2-NEXT: [[A30:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A30]], i64 0, i64 0
// CHECK2-NEXT: [[TMP45:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK2-NEXT: ret i32 [[TMP45]]
//
//
// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36
// CHECK2-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK2-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK2-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK2-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK2-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK2-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK2-NEXT: store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK2-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
// CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK2: cond.true:
// CHECK2-NEXT: br label [[COND_END:%.*]]
// CHECK2: cond.false:
// CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK2-NEXT: br label [[COND_END]]
// CHECK2: cond.end:
// CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK2-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK2: omp.inner.for.cond:
// CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK2-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK2-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK2: omp.inner.for.body:
// CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK2-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
// CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK2-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, %struct.SS*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], %struct.SS* [[TMP0]])
// CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK2: omp.inner.for.inc:
// CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
// CHECK2-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK2: omp.inner.for.end:
// CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK2: omp.loop.exit:
// CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK2-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK2-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK2-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK2-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK2-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK2-NEXT: store i32 122, i32* [[DOTOMP_UB]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK2-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
// CHECK2-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK2-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
// CHECK2-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
// CHECK2-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
// CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK2-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK2-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 122
// CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK2: cond.true:
// CHECK2-NEXT: br label [[COND_END:%.*]]
// CHECK2: cond.false:
// CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK2-NEXT: br label [[COND_END]]
// CHECK2: cond.end:
// CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
// CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK2-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK2: omp.inner.for.cond:
// CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK2-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
// CHECK2-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK2: omp.inner.for.body:
// CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK2-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
// CHECK2-NEXT: [[TMP11:%.*]] = load i32, i32* [[I]], align 4
// CHECK2-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i64 0, i64 [[IDXPROM]]
// CHECK2-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK2: omp.body.continue:
// CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK2: omp.inner.for.inc:
// CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
// CHECK2-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK2: omp.inner.for.end:
// CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK2: omp.loop.exit:
// CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l40
// CHECK2-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK2-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK2-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..2
// CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK2-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK2-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK2-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK2-NEXT: store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK2-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
// CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK2: cond.true:
// CHECK2-NEXT: br label [[COND_END:%.*]]
// CHECK2: cond.false:
// CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK2-NEXT: br label [[COND_END]]
// CHECK2: cond.end:
// CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK2-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK2: omp.inner.for.cond:
// CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK2-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK2-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK2: omp.inner.for.body:
// CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK2-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
// CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK2-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, %struct.SS*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], %struct.SS* [[TMP0]])
// CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK2: omp.inner.for.inc:
// CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
// CHECK2-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK2: omp.inner.for.end:
// CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK2: omp.loop.exit:
// CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..3
// CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK2-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK2-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK2-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK2-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK2-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK2-NEXT: store i32 122, i32* [[DOTOMP_UB]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK2-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
// CHECK2-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK2-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
// CHECK2-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
// CHECK2-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
// CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK2-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK2-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 122
// CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK2: cond.true:
// CHECK2-NEXT: br label [[COND_END:%.*]]
// CHECK2: cond.false:
// CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK2-NEXT: br label [[COND_END]]
// CHECK2: cond.end:
// CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
// CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK2-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK2: omp.inner.for.cond:
// CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK2-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
// CHECK2-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK2: omp.inner.for.body:
// CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK2-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
// CHECK2-NEXT: [[TMP11:%.*]] = load i32, i32* [[I]], align 4
// CHECK2-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i64 0, i64 [[IDXPROM]]
// CHECK2-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK2: omp.body.continue:
// CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK2: omp.inner.for.inc:
// CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
// CHECK2-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK2: omp.inner.for.end:
// CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK2: omp.loop.exit:
// CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l44
// CHECK2-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK2-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK2-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..6
// CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK2-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK2-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK2-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK2-NEXT: store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK2-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
// CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK2: cond.true:
// CHECK2-NEXT: br label [[COND_END:%.*]]
// CHECK2: cond.false:
// CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK2-NEXT: br label [[COND_END]]
// CHECK2: cond.end:
// CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK2-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK2: omp.inner.for.cond:
// CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK2-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK2-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK2: omp.inner.for.body:
// CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK2-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
// CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK2-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, %struct.SS*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], %struct.SS* [[TMP0]])
// CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK2: omp.inner.for.inc:
// CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
// CHECK2-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK2: omp.inner.for.end:
// CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK2: omp.loop.exit:
// CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..7
// CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK2-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK2-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK2-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK2-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK2-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK2-NEXT: store i32 122, i32* [[DOTOMP_UB]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK2-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
// CHECK2-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK2-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
// CHECK2-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
// CHECK2-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
// CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK2-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK2-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 61)
// CHECK2-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK2: omp.dispatch.cond:
// CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK2-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK2-NEXT: [[CONV2:%.*]] = trunc i64 [[TMP6]] to i32
// CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], [[CONV2]]
// CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK2: cond.true:
// CHECK2-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK2-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP7]] to i32
// CHECK2-NEXT: br label [[COND_END:%.*]]
// CHECK2: cond.false:
// CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK2-NEXT: br label [[COND_END]]
// CHECK2: cond.end:
// CHECK2-NEXT: [[COND:%.*]] = phi i32 [ [[CONV3]], [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ]
// CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK2-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK2-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK2-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
// CHECK2-NEXT: br i1 [[CMP4]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK2: omp.dispatch.body:
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK2: omp.inner.for.cond:
// CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK2-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
// CHECK2-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK2: omp.inner.for.body:
// CHECK2-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP14]], 1
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK2-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
// CHECK2-NEXT: [[TMP15:%.*]] = load i32, i32* [[I]], align 4
// CHECK2-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP15]] to i64
// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i64 0, i64 [[IDXPROM]]
// CHECK2-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK2: omp.body.continue:
// CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK2: omp.inner.for.inc:
// CHECK2-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP16]], 1
// CHECK2-NEXT: store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK2: omp.inner.for.end:
// CHECK2-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK2: omp.dispatch.inc:
// CHECK2-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK2-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK2-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
// CHECK2-NEXT: store i32 [[ADD7]], i32* [[DOTOMP_LB]], align 4
// CHECK2-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK2-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK2-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
// CHECK2-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_UB]], align 4
// CHECK2-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK2: omp.dispatch.end:
// CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l49
// CHECK2-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK2-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK2-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..10
// CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK2-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK2-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK2-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK2-NEXT: store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK2-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
// CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK2: cond.true:
// CHECK2-NEXT: br label [[COND_END:%.*]]
// CHECK2: cond.false:
// CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK2-NEXT: br label [[COND_END]]
// CHECK2: cond.end:
// CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK2-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK2: omp.inner.for.cond:
// CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK2-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK2-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK2: omp.inner.for.body:
// CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK2-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
// CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK2-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, %struct.SS*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], %struct.SS* [[TMP0]])
// CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK2: omp.inner.for.inc:
// CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
// CHECK2-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK2: omp.inner.for.end:
// CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK2: omp.loop.exit:
// CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..11
// CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK2-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK2-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK2-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK2-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK2-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK2-NEXT: store i32 122, i32* [[DOTOMP_UB]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK2-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
// CHECK2-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK2-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
// CHECK2-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
// CHECK2-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
// CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK2-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
// CHECK2-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32 35, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 1)
// CHECK2-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK2: omp.dispatch.cond:
// CHECK2-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
// CHECK2-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
// CHECK2-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK2: omp.dispatch.body:
// CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK2-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK2: omp.inner.for.cond:
// CHECK2-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
// CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !10
// CHECK2-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
// CHECK2-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK2: omp.inner.for.body:
// CHECK2-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
// CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK2-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !10
// CHECK2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
// CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !10
// CHECK2-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP12]] to i64
// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i64 0, i64 [[IDXPROM]]
// CHECK2-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !10
// CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK2: omp.body.continue:
// CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK2: omp.inner.for.inc:
// CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
// CHECK2-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP13]], 1
// CHECK2-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]]
// CHECK2: omp.inner.for.end:
// CHECK2-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK2: omp.dispatch.inc:
// CHECK2-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK2: omp.dispatch.end:
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l54
// CHECK2-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK2-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK2-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..14
// CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK2-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK2-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK2-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK2-NEXT: store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK2-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
// CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK2: cond.true:
// CHECK2-NEXT: br label [[COND_END:%.*]]
// CHECK2: cond.false:
// CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK2-NEXT: br label [[COND_END]]
// CHECK2: cond.end:
// CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK2-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK2: omp.inner.for.cond:
// CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK2-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK2-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK2: omp.inner.for.body:
// CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK2-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
// CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK2-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, %struct.SS*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], %struct.SS* [[TMP0]])
// CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK2: omp.inner.for.inc:
// CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
// CHECK2-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK2: omp.inner.for.end:
// CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK2: omp.loop.exit:
// CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..15
// CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK2-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK2-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK2-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK2-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK2-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK2-NEXT: store i32 122, i32* [[DOTOMP_UB]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK2-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
// CHECK2-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK2-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
// CHECK2-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
// CHECK2-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
// CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK2-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
// CHECK2-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32 35, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 61)
// CHECK2-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK2: omp.dispatch.cond:
// CHECK2-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
// CHECK2-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
// CHECK2-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK2: omp.dispatch.body:
// CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK2-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK2: omp.inner.for.cond:
// CHECK2-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13
// CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !13
// CHECK2-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
// CHECK2-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK2: omp.inner.for.body:
// CHECK2-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13
// CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK2-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !13
// CHECK2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
// CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !13
// CHECK2-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP12]] to i64
// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i64 0, i64 [[IDXPROM]]
// CHECK2-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !13
// CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK2: omp.body.continue:
// CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK2: omp.inner.for.inc:
// CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13
// CHECK2-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP13]], 1
// CHECK2-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]]
// CHECK2: omp.inner.for.end:
// CHECK2-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK2: omp.dispatch.inc:
// CHECK2-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK2: omp.dispatch.end:
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
// CHECK2-SAME: () #[[ATTR3:[0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: call void @__tgt_register_requires(i64 1)
// CHECK2-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@_Z21teams_template_structv
// CHECK3-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[V:%.*]] = alloca [[STRUCT_SS:%.*]], align 4
// CHECK3-NEXT: [[CALL:%.*]] = call i32 @_ZN2SSIiLi123ELx456EE3fooEv(%struct.SS* nonnull align 4 dereferenceable(496) [[V]])
// CHECK3-NEXT: ret i32 [[CALL]]
//
//
// CHECK3-LABEL: define {{[^@]+}}@_ZN2SSIiLi123ELx456EE3fooEv
// CHECK3-SAME: (%struct.SS* nonnull align 4 dereferenceable(496) [[THIS:%.*]]) #[[ATTR0]] comdat align 2 {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 4
// CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 4
// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 4
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS3:%.*]] = alloca [1 x i8*], align 4
// CHECK3-NEXT: [[DOTOFFLOAD_PTRS4:%.*]] = alloca [1 x i8*], align 4
// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS5:%.*]] = alloca [1 x i8*], align 4
// CHECK3-NEXT: [[_TMP6:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS10:%.*]] = alloca [1 x i8*], align 4
// CHECK3-NEXT: [[DOTOFFLOAD_PTRS11:%.*]] = alloca [1 x i8*], align 4
// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS12:%.*]] = alloca [1 x i8*], align 4
// CHECK3-NEXT: [[_TMP13:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS17:%.*]] = alloca [1 x i8*], align 4
// CHECK3-NEXT: [[DOTOFFLOAD_PTRS18:%.*]] = alloca [1 x i8*], align 4
// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS19:%.*]] = alloca [1 x i8*], align 4
// CHECK3-NEXT: [[_TMP20:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS24:%.*]] = alloca [1 x i8*], align 4
// CHECK3-NEXT: [[DOTOFFLOAD_PTRS25:%.*]] = alloca [1 x i8*], align 4
// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS26:%.*]] = alloca [1 x i8*], align 4
// CHECK3-NEXT: [[_TMP27:%.*]] = alloca i32, align 4
// CHECK3-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK3-NEXT: [[THIS1:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[THIS1]], i32 0, i32 0
// CHECK3-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK3-NEXT: [[TMP1:%.*]] = bitcast i8** [[TMP0]] to %struct.SS**
// CHECK3-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP1]], align 4
// CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK3-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to [123 x i32]**
// CHECK3-NEXT: store [123 x i32]* [[A]], [123 x i32]** [[TMP3]], align 4
// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// CHECK3-NEXT: store i8* null, i8** [[TMP4]], align 4
// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK3-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 123)
// CHECK3-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK3-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0
// CHECK3-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK3: omp_offload.failed:
// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36(%struct.SS* [[THIS1]]) #[[ATTR2:[0-9]+]]
// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK3: omp_offload.cont:
// CHECK3-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
// CHECK3-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to %struct.SS**
// CHECK3-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP10]], align 4
// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
// CHECK3-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to [123 x i32]**
// CHECK3-NEXT: store [123 x i32]* [[A2]], [123 x i32]** [[TMP12]], align 4
// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS5]], i32 0, i32 0
// CHECK3-NEXT: store i8* null, i8** [[TMP13]], align 4
// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
// CHECK3-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
// CHECK3-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l40.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK3-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
// CHECK3-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED7:%.*]], label [[OMP_OFFLOAD_CONT8:%.*]]
// CHECK3: omp_offload.failed7:
// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l40(%struct.SS* [[THIS1]]) #[[ATTR2]]
// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT8]]
// CHECK3: omp_offload.cont8:
// CHECK3-NEXT: [[A9:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
// CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0
// CHECK3-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to %struct.SS**
// CHECK3-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP19]], align 4
// CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0
// CHECK3-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to [123 x i32]**
// CHECK3-NEXT: store [123 x i32]* [[A9]], [123 x i32]** [[TMP21]], align 4
// CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS12]], i32 0, i32 0
// CHECK3-NEXT: store i8* null, i8** [[TMP22]], align 4
// CHECK3-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0
// CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0
// CHECK3-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
// CHECK3-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l44.region_id, i32 1, i8** [[TMP23]], i8** [[TMP24]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK3-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
// CHECK3-NEXT: br i1 [[TMP26]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]]
// CHECK3: omp_offload.failed14:
// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l44(%struct.SS* [[THIS1]]) #[[ATTR2]]
// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT15]]
// CHECK3: omp_offload.cont15:
// CHECK3-NEXT: [[A16:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
// CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0
// CHECK3-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to %struct.SS**
// CHECK3-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP28]], align 4
// CHECK3-NEXT: [[TMP29:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0
// CHECK3-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to [123 x i32]**
// CHECK3-NEXT: store [123 x i32]* [[A16]], [123 x i32]** [[TMP30]], align 4
// CHECK3-NEXT: [[TMP31:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 0
// CHECK3-NEXT: store i8* null, i8** [[TMP31]], align 4
// CHECK3-NEXT: [[TMP32:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0
// CHECK3-NEXT: [[TMP33:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0
// CHECK3-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
// CHECK3-NEXT: [[TMP34:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l49.region_id, i32 1, i8** [[TMP32]], i8** [[TMP33]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK3-NEXT: [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
// CHECK3-NEXT: br i1 [[TMP35]], label [[OMP_OFFLOAD_FAILED21:%.*]], label [[OMP_OFFLOAD_CONT22:%.*]]
// CHECK3: omp_offload.failed21:
// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l49(%struct.SS* [[THIS1]]) #[[ATTR2]]
// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT22]]
// CHECK3: omp_offload.cont22:
// CHECK3-NEXT: [[A23:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
// CHECK3-NEXT: [[TMP36:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS24]], i32 0, i32 0
// CHECK3-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to %struct.SS**
// CHECK3-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP37]], align 4
// CHECK3-NEXT: [[TMP38:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS25]], i32 0, i32 0
// CHECK3-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to [123 x i32]**
// CHECK3-NEXT: store [123 x i32]* [[A23]], [123 x i32]** [[TMP39]], align 4
// CHECK3-NEXT: [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS26]], i32 0, i32 0
// CHECK3-NEXT: store i8* null, i8** [[TMP40]], align 4
// CHECK3-NEXT: [[TMP41:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS24]], i32 0, i32 0
// CHECK3-NEXT: [[TMP42:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS25]], i32 0, i32 0
// CHECK3-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
// CHECK3-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l54.region_id, i32 1, i8** [[TMP41]], i8** [[TMP42]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK3-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0
// CHECK3-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED28:%.*]], label [[OMP_OFFLOAD_CONT29:%.*]]
// CHECK3: omp_offload.failed28:
// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l54(%struct.SS* [[THIS1]]) #[[ATTR2]]
// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT29]]
// CHECK3: omp_offload.cont29:
// CHECK3-NEXT: [[A30:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A30]], i32 0, i32 0
// CHECK3-NEXT: [[TMP45:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK3-NEXT: ret i32 [[TMP45]]
//
//
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36
// CHECK3-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK3-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK3-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK3-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK3-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK3-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK3-NEXT: store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK3-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK3: cond.true:
// CHECK3-NEXT: br label [[COND_END:%.*]]
// CHECK3: cond.false:
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK3-NEXT: br label [[COND_END]]
// CHECK3: cond.end:
// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK3-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK3: omp.inner.for.cond:
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK3: omp.inner.for.body:
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, %struct.SS*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], %struct.SS* [[TMP0]])
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK3: omp.inner.for.inc:
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
// CHECK3-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK3: omp.inner.for.end:
// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK3: omp.loop.exit:
// CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK3-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK3-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK3-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK3-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK3-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK3-NEXT: store i32 122, i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK3-NEXT: store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
// CHECK3-NEXT: store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK3-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 122
// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK3: cond.true:
// CHECK3-NEXT: br label [[COND_END:%.*]]
// CHECK3: cond.false:
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: br label [[COND_END]]
// CHECK3: cond.end:
// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
// CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK3-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK3: omp.inner.for.cond:
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
// CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK3: omp.inner.for.body:
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK3-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[I]], align 4
// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i32 0, i32 [[TMP11]]
// CHECK3-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK3: omp.body.continue:
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK3: omp.inner.for.inc:
// CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP12]], 1
// CHECK3-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK3: omp.inner.for.end:
// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK3: omp.loop.exit:
// CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l40
// CHECK3-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK3-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK3-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..2
// CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK3-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK3-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK3-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK3-NEXT: store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK3-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK3: cond.true:
// CHECK3-NEXT: br label [[COND_END:%.*]]
// CHECK3: cond.false:
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK3-NEXT: br label [[COND_END]]
// CHECK3: cond.end:
// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK3-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK3: omp.inner.for.cond:
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK3: omp.inner.for.body:
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, %struct.SS*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], %struct.SS* [[TMP0]])
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK3: omp.inner.for.inc:
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
// CHECK3-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK3: omp.inner.for.end:
// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK3: omp.loop.exit:
// CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..3
// CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK3-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK3-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK3-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK3-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK3-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK3-NEXT: store i32 122, i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK3-NEXT: store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
// CHECK3-NEXT: store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK3-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 122
// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK3: cond.true:
// CHECK3-NEXT: br label [[COND_END:%.*]]
// CHECK3: cond.false:
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: br label [[COND_END]]
// CHECK3: cond.end:
// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
// CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK3-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK3: omp.inner.for.cond:
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
// CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK3: omp.inner.for.body:
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK3-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[I]], align 4
// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i32 0, i32 [[TMP11]]
// CHECK3-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK3: omp.body.continue:
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK3: omp.inner.for.inc:
// CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP12]], 1
// CHECK3-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK3: omp.inner.for.end:
// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK3: omp.loop.exit:
// CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l44
// CHECK3-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK3-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK3-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..6
// CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK3-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK3-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK3-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK3-NEXT: store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK3-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK3: cond.true:
// CHECK3-NEXT: br label [[COND_END:%.*]]
// CHECK3: cond.false:
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK3-NEXT: br label [[COND_END]]
// CHECK3: cond.end:
// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK3-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK3: omp.inner.for.cond:
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK3: omp.inner.for.body:
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, %struct.SS*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], %struct.SS* [[TMP0]])
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK3: omp.inner.for.inc:
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
// CHECK3-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK3: omp.inner.for.end:
// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK3: omp.loop.exit:
// CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..7
// CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK3-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK3-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK3-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK3-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK3-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK3-NEXT: store i32 122, i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK3-NEXT: store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
// CHECK3-NEXT: store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK3-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 61)
// CHECK3-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK3: omp.dispatch.cond:
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], [[TMP6]]
// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK3: cond.true:
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK3-NEXT: br label [[COND_END:%.*]]
// CHECK3: cond.false:
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: br label [[COND_END]]
// CHECK3: cond.end:
// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ [[TMP7]], [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ]
// CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK3-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
// CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK3: omp.dispatch.body:
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK3: omp.inner.for.cond:
// CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
// CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK3: omp.inner.for.body:
// CHECK3-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP14]], 1
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK3-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
// CHECK3-NEXT: [[TMP15:%.*]] = load i32, i32* [[I]], align 4
// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i32 0, i32 [[TMP15]]
// CHECK3-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK3: omp.body.continue:
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK3: omp.inner.for.inc:
// CHECK3-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP16]], 1
// CHECK3-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK3: omp.inner.for.end:
// CHECK3-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK3: omp.dispatch.inc:
// CHECK3-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK3-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
// CHECK3-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_LB]], align 4
// CHECK3-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK3-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
// CHECK3-NEXT: store i32 [[ADD5]], i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK3: omp.dispatch.end:
// CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l49
// CHECK3-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK3-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK3-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..10
// CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK3-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK3-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK3-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK3-NEXT: store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK3-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK3: cond.true:
// CHECK3-NEXT: br label [[COND_END:%.*]]
// CHECK3: cond.false:
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK3-NEXT: br label [[COND_END]]
// CHECK3: cond.end:
// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK3-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK3: omp.inner.for.cond:
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK3: omp.inner.for.body:
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, %struct.SS*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], %struct.SS* [[TMP0]])
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK3: omp.inner.for.inc:
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
// CHECK3-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK3: omp.inner.for.end:
// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK3: omp.loop.exit:
// CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..11
// CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK3-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK3-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK3-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK3-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK3-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK3-NEXT: store i32 122, i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK3-NEXT: store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
// CHECK3-NEXT: store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
// CHECK3-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32 35, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 1)
// CHECK3-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK3: omp.dispatch.cond:
// CHECK3-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
// CHECK3-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
// CHECK3-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK3: omp.dispatch.body:
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK3-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK3: omp.inner.for.cond:
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !11
// CHECK3-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
// CHECK3-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK3: omp.inner.for.body:
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK3-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !11
// CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
// CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !11
// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i32 0, i32 [[TMP12]]
// CHECK3-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !11
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK3: omp.body.continue:
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK3: omp.inner.for.inc:
// CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
// CHECK3-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP13]], 1
// CHECK3-NEXT: store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]]
// CHECK3: omp.inner.for.end:
// CHECK3-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK3: omp.dispatch.inc:
// CHECK3-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK3: omp.dispatch.end:
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l54
// CHECK3-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK3-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK3-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..14
// CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK3-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK3-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK3-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK3-NEXT: store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK3-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK3: cond.true:
// CHECK3-NEXT: br label [[COND_END:%.*]]
// CHECK3: cond.false:
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK3-NEXT: br label [[COND_END]]
// CHECK3: cond.end:
// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK3-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK3: omp.inner.for.cond:
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK3: omp.inner.for.body:
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, %struct.SS*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], %struct.SS* [[TMP0]])
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK3: omp.inner.for.inc:
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
// CHECK3-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK3: omp.inner.for.end:
// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK3: omp.loop.exit:
// CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..15
// CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK3-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK3-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK3-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK3-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK3-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK3-NEXT: store i32 122, i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK3-NEXT: store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
// CHECK3-NEXT: store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
// CHECK3-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32 35, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 61)
// CHECK3-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK3: omp.dispatch.cond:
// CHECK3-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
// CHECK3-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
// CHECK3-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK3: omp.dispatch.body:
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK3-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK3: omp.inner.for.cond:
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !14
// CHECK3-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
// CHECK3-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK3: omp.inner.for.body:
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK3-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !14
// CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
// CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !14
// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i32 0, i32 [[TMP12]]
// CHECK3-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !14
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK3: omp.body.continue:
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK3: omp.inner.for.inc:
// CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
// CHECK3-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP13]], 1
// CHECK3-NEXT: store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP15:![0-9]+]]
// CHECK3: omp.inner.for.end:
// CHECK3-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK3: omp.dispatch.inc:
// CHECK3-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK3: omp.dispatch.end:
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
// CHECK3-SAME: () #[[ATTR3:[0-9]+]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: call void @__tgt_register_requires(i64 1)
// CHECK3-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@_Z21teams_template_structv
// CHECK4-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[V:%.*]] = alloca [[STRUCT_SS:%.*]], align 4
// CHECK4-NEXT: [[CALL:%.*]] = call i32 @_ZN2SSIiLi123ELx456EE3fooEv(%struct.SS* nonnull align 4 dereferenceable(496) [[V]])
// CHECK4-NEXT: ret i32 [[CALL]]
//
//
// CHECK4-LABEL: define {{[^@]+}}@_ZN2SSIiLi123ELx456EE3fooEv
// CHECK4-SAME: (%struct.SS* nonnull align 4 dereferenceable(496) [[THIS:%.*]]) #[[ATTR0]] comdat align 2 {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK4-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 4
// CHECK4-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 4
// CHECK4-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 4
// CHECK4-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOFFLOAD_BASEPTRS3:%.*]] = alloca [1 x i8*], align 4
// CHECK4-NEXT: [[DOTOFFLOAD_PTRS4:%.*]] = alloca [1 x i8*], align 4
// CHECK4-NEXT: [[DOTOFFLOAD_MAPPERS5:%.*]] = alloca [1 x i8*], align 4
// CHECK4-NEXT: [[_TMP6:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOFFLOAD_BASEPTRS10:%.*]] = alloca [1 x i8*], align 4
// CHECK4-NEXT: [[DOTOFFLOAD_PTRS11:%.*]] = alloca [1 x i8*], align 4
// CHECK4-NEXT: [[DOTOFFLOAD_MAPPERS12:%.*]] = alloca [1 x i8*], align 4
// CHECK4-NEXT: [[_TMP13:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOFFLOAD_BASEPTRS17:%.*]] = alloca [1 x i8*], align 4
// CHECK4-NEXT: [[DOTOFFLOAD_PTRS18:%.*]] = alloca [1 x i8*], align 4
// CHECK4-NEXT: [[DOTOFFLOAD_MAPPERS19:%.*]] = alloca [1 x i8*], align 4
// CHECK4-NEXT: [[_TMP20:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOFFLOAD_BASEPTRS24:%.*]] = alloca [1 x i8*], align 4
// CHECK4-NEXT: [[DOTOFFLOAD_PTRS25:%.*]] = alloca [1 x i8*], align 4
// CHECK4-NEXT: [[DOTOFFLOAD_MAPPERS26:%.*]] = alloca [1 x i8*], align 4
// CHECK4-NEXT: [[_TMP27:%.*]] = alloca i32, align 4
// CHECK4-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK4-NEXT: [[THIS1:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK4-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[THIS1]], i32 0, i32 0
// CHECK4-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK4-NEXT: [[TMP1:%.*]] = bitcast i8** [[TMP0]] to %struct.SS**
// CHECK4-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP1]], align 4
// CHECK4-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK4-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to [123 x i32]**
// CHECK4-NEXT: store [123 x i32]* [[A]], [123 x i32]** [[TMP3]], align 4
// CHECK4-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// CHECK4-NEXT: store i8* null, i8** [[TMP4]], align 4
// CHECK4-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK4-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK4-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 123)
// CHECK4-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK4-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0
// CHECK4-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK4: omp_offload.failed:
// CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36(%struct.SS* [[THIS1]]) #[[ATTR2:[0-9]+]]
// CHECK4-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK4: omp_offload.cont:
// CHECK4-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
// CHECK4-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
// CHECK4-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to %struct.SS**
// CHECK4-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP10]], align 4
// CHECK4-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
// CHECK4-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to [123 x i32]**
// CHECK4-NEXT: store [123 x i32]* [[A2]], [123 x i32]** [[TMP12]], align 4
// CHECK4-NEXT: [[TMP13:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS5]], i32 0, i32 0
// CHECK4-NEXT: store i8* null, i8** [[TMP13]], align 4
// CHECK4-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
// CHECK4-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
// CHECK4-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
// CHECK4-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l40.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK4-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
// CHECK4-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED7:%.*]], label [[OMP_OFFLOAD_CONT8:%.*]]
// CHECK4: omp_offload.failed7:
// CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l40(%struct.SS* [[THIS1]]) #[[ATTR2]]
// CHECK4-NEXT: br label [[OMP_OFFLOAD_CONT8]]
// CHECK4: omp_offload.cont8:
// CHECK4-NEXT: [[A9:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
// CHECK4-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0
// CHECK4-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to %struct.SS**
// CHECK4-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP19]], align 4
// CHECK4-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0
// CHECK4-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to [123 x i32]**
// CHECK4-NEXT: store [123 x i32]* [[A9]], [123 x i32]** [[TMP21]], align 4
// CHECK4-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS12]], i32 0, i32 0
// CHECK4-NEXT: store i8* null, i8** [[TMP22]], align 4
// CHECK4-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0
// CHECK4-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0
// CHECK4-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
// CHECK4-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l44.region_id, i32 1, i8** [[TMP23]], i8** [[TMP24]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK4-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
// CHECK4-NEXT: br i1 [[TMP26]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]]
// CHECK4: omp_offload.failed14:
// CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l44(%struct.SS* [[THIS1]]) #[[ATTR2]]
// CHECK4-NEXT: br label [[OMP_OFFLOAD_CONT15]]
// CHECK4: omp_offload.cont15:
// CHECK4-NEXT: [[A16:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
// CHECK4-NEXT: [[TMP27:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0
// CHECK4-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to %struct.SS**
// CHECK4-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP28]], align 4
// CHECK4-NEXT: [[TMP29:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0
// CHECK4-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to [123 x i32]**
// CHECK4-NEXT: store [123 x i32]* [[A16]], [123 x i32]** [[TMP30]], align 4
// CHECK4-NEXT: [[TMP31:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 0
// CHECK4-NEXT: store i8* null, i8** [[TMP31]], align 4
// CHECK4-NEXT: [[TMP32:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0
// CHECK4-NEXT: [[TMP33:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0
// CHECK4-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
// CHECK4-NEXT: [[TMP34:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l49.region_id, i32 1, i8** [[TMP32]], i8** [[TMP33]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK4-NEXT: [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
// CHECK4-NEXT: br i1 [[TMP35]], label [[OMP_OFFLOAD_FAILED21:%.*]], label [[OMP_OFFLOAD_CONT22:%.*]]
// CHECK4: omp_offload.failed21:
// CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l49(%struct.SS* [[THIS1]]) #[[ATTR2]]
// CHECK4-NEXT: br label [[OMP_OFFLOAD_CONT22]]
// CHECK4: omp_offload.cont22:
// CHECK4-NEXT: [[A23:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
// CHECK4-NEXT: [[TMP36:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS24]], i32 0, i32 0
// CHECK4-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to %struct.SS**
// CHECK4-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP37]], align 4
// CHECK4-NEXT: [[TMP38:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS25]], i32 0, i32 0
// CHECK4-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to [123 x i32]**
// CHECK4-NEXT: store [123 x i32]* [[A23]], [123 x i32]** [[TMP39]], align 4
// CHECK4-NEXT: [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS26]], i32 0, i32 0
// CHECK4-NEXT: store i8* null, i8** [[TMP40]], align 4
// CHECK4-NEXT: [[TMP41:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS24]], i32 0, i32 0
// CHECK4-NEXT: [[TMP42:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS25]], i32 0, i32 0
// CHECK4-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
// CHECK4-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l54.region_id, i32 1, i8** [[TMP41]], i8** [[TMP42]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK4-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0
// CHECK4-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED28:%.*]], label [[OMP_OFFLOAD_CONT29:%.*]]
// CHECK4: omp_offload.failed28:
// CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l54(%struct.SS* [[THIS1]]) #[[ATTR2]]
// CHECK4-NEXT: br label [[OMP_OFFLOAD_CONT29]]
// CHECK4: omp_offload.cont29:
// CHECK4-NEXT: [[A30:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
// CHECK4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A30]], i32 0, i32 0
// CHECK4-NEXT: [[TMP45:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK4-NEXT: ret i32 [[TMP45]]
//
//
// CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36
// CHECK4-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK4-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK4-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
// CHECK4-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK4-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK4-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK4-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK4-NEXT: store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK4-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK4-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK4-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
// CHECK4-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK4: cond.true:
// CHECK4-NEXT: br label [[COND_END:%.*]]
// CHECK4: cond.false:
// CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK4-NEXT: br label [[COND_END]]
// CHECK4: cond.end:
// CHECK4-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK4-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK4-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK4: omp.inner.for.cond:
// CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK4-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK4-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK4: omp.inner.for.body:
// CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK4-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, %struct.SS*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], %struct.SS* [[TMP0]])
// CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK4: omp.inner.for.inc:
// CHECK4-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
// CHECK4-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK4: omp.inner.for.end:
// CHECK4-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK4: omp.loop.exit:
// CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK4-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK4-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK4-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK4-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK4-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK4-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK4-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK4-NEXT: store i32 122, i32* [[DOTOMP_UB]], align 4
// CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK4-NEXT: store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
// CHECK4-NEXT: store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
// CHECK4-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK4-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK4-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 122
// CHECK4-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK4: cond.true:
// CHECK4-NEXT: br label [[COND_END:%.*]]
// CHECK4: cond.false:
// CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK4-NEXT: br label [[COND_END]]
// CHECK4: cond.end:
// CHECK4-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
// CHECK4-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK4-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK4-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK4: omp.inner.for.cond:
// CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK4-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
// CHECK4-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK4: omp.inner.for.body:
// CHECK4-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
// CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK4-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK4-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
// CHECK4-NEXT: [[TMP11:%.*]] = load i32, i32* [[I]], align 4
// CHECK4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i32 0, i32 [[TMP11]]
// CHECK4-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK4-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK4: omp.body.continue:
// CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK4: omp.inner.for.inc:
// CHECK4-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP12]], 1
// CHECK4-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK4: omp.inner.for.end:
// CHECK4-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK4: omp.loop.exit:
// CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
// CHECK4-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l40
// CHECK4-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK4-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK4-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
// CHECK4-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..2
// CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK4-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK4-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK4-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK4-NEXT: store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK4-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK4-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK4-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
// CHECK4-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK4: cond.true:
// CHECK4-NEXT: br label [[COND_END:%.*]]
// CHECK4: cond.false:
// CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK4-NEXT: br label [[COND_END]]
// CHECK4: cond.end:
// CHECK4-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK4-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK4-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK4: omp.inner.for.cond:
// CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK4-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK4-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK4: omp.inner.for.body:
// CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK4-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, %struct.SS*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], %struct.SS* [[TMP0]])
// CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK4: omp.inner.for.inc:
// CHECK4-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
// CHECK4-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK4: omp.inner.for.end:
// CHECK4-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK4: omp.loop.exit:
// CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK4-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..3
// CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK4-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK4-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK4-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK4-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK4-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK4-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK4-NEXT: store i32 122, i32* [[DOTOMP_UB]], align 4
// CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK4-NEXT: store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
// CHECK4-NEXT: store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
// CHECK4-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK4-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK4-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 122
// CHECK4-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK4: cond.true:
// CHECK4-NEXT: br label [[COND_END:%.*]]
// CHECK4: cond.false:
// CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK4-NEXT: br label [[COND_END]]
// CHECK4: cond.end:
// CHECK4-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
// CHECK4-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK4-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK4-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK4: omp.inner.for.cond:
// CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK4-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
// CHECK4-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK4: omp.inner.for.body:
// CHECK4-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
// CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK4-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK4-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
// CHECK4-NEXT: [[TMP11:%.*]] = load i32, i32* [[I]], align 4
// CHECK4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i32 0, i32 [[TMP11]]
// CHECK4-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK4-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK4: omp.body.continue:
// CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK4: omp.inner.for.inc:
// CHECK4-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP12]], 1
// CHECK4-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK4: omp.inner.for.end:
// CHECK4-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK4: omp.loop.exit:
// CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
// CHECK4-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l44
// CHECK4-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK4-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK4-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
// CHECK4-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..6
// CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK4-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK4-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK4-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK4-NEXT: store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK4-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK4-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK4-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
// CHECK4-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK4: cond.true:
// CHECK4-NEXT: br label [[COND_END:%.*]]
// CHECK4: cond.false:
// CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK4-NEXT: br label [[COND_END]]
// CHECK4: cond.end:
// CHECK4-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK4-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK4-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK4: omp.inner.for.cond:
// CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK4-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK4-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK4: omp.inner.for.body:
// CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK4-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, %struct.SS*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], %struct.SS* [[TMP0]])
// CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK4: omp.inner.for.inc:
// CHECK4-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
// CHECK4-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK4: omp.inner.for.end:
// CHECK4-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK4: omp.loop.exit:
// CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK4-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..7
// CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK4-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK4-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK4-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK4-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK4-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK4-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK4-NEXT: store i32 122, i32* [[DOTOMP_UB]], align 4
// CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK4-NEXT: store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
// CHECK4-NEXT: store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
// CHECK4-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK4-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK4-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 61)
// CHECK4-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK4: omp.dispatch.cond:
// CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], [[TMP6]]
// CHECK4-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK4: cond.true:
// CHECK4-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK4-NEXT: br label [[COND_END:%.*]]
// CHECK4: cond.false:
// CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK4-NEXT: br label [[COND_END]]
// CHECK4: cond.end:
// CHECK4-NEXT: [[COND:%.*]] = phi i32 [ [[TMP7]], [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ]
// CHECK4-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK4-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK4-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK4-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
// CHECK4-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK4: omp.dispatch.body:
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK4: omp.inner.for.cond:
// CHECK4-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK4-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
// CHECK4-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK4: omp.inner.for.body:
// CHECK4-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP14]], 1
// CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK4-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK4-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
// CHECK4-NEXT: [[TMP15:%.*]] = load i32, i32* [[I]], align 4
// CHECK4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i32 0, i32 [[TMP15]]
// CHECK4-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK4-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK4: omp.body.continue:
// CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK4: omp.inner.for.inc:
// CHECK4-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP16]], 1
// CHECK4-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK4: omp.inner.for.end:
// CHECK4-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK4: omp.dispatch.inc:
// CHECK4-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK4-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK4-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
// CHECK4-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_LB]], align 4
// CHECK4-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK4-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK4-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
// CHECK4-NEXT: store i32 [[ADD5]], i32* [[DOTOMP_UB]], align 4
// CHECK4-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK4: omp.dispatch.end:
// CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
// CHECK4-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l49
// CHECK4-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK4-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK4-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
// CHECK4-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..10
// CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK4-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK4-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK4-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK4-NEXT: store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK4-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK4-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK4-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
// CHECK4-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK4: cond.true:
// CHECK4-NEXT: br label [[COND_END:%.*]]
// CHECK4: cond.false:
// CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK4-NEXT: br label [[COND_END]]
// CHECK4: cond.end:
// CHECK4-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK4-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK4-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK4: omp.inner.for.cond:
// CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK4-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK4-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK4: omp.inner.for.body:
// CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK4-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, %struct.SS*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], %struct.SS* [[TMP0]])
// CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK4: omp.inner.for.inc:
// CHECK4-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
// CHECK4-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK4: omp.inner.for.end:
// CHECK4-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK4: omp.loop.exit:
// CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK4-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..11
// CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK4-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK4-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK4-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK4-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK4-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK4-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK4-NEXT: store i32 122, i32* [[DOTOMP_UB]], align 4
// CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK4-NEXT: store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
// CHECK4-NEXT: store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
// CHECK4-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK4-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
// CHECK4-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32 35, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 1)
// CHECK4-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK4: omp.dispatch.cond:
// CHECK4-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
// CHECK4-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
// CHECK4-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK4: omp.dispatch.body:
// CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK4-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK4: omp.inner.for.cond:
// CHECK4-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
// CHECK4-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !11
// CHECK4-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
// CHECK4-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK4: omp.inner.for.body:
// CHECK4-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
// CHECK4-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
// CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK4-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !11
// CHECK4-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
// CHECK4-NEXT: [[TMP12:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !11
// CHECK4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i32 0, i32 [[TMP12]]
// CHECK4-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !11
// CHECK4-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK4: omp.body.continue:
// CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK4: omp.inner.for.inc:
// CHECK4-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
// CHECK4-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP13]], 1
// CHECK4-NEXT: store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]]
// CHECK4: omp.inner.for.end:
// CHECK4-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK4: omp.dispatch.inc:
// CHECK4-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK4: omp.dispatch.end:
// CHECK4-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l54
// CHECK4-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK4-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK4-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
// CHECK4-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..14
// CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK4-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK4-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK4-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK4-NEXT: store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK4-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK4-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK4-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
// CHECK4-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK4: cond.true:
// CHECK4-NEXT: br label [[COND_END:%.*]]
// CHECK4: cond.false:
// CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK4-NEXT: br label [[COND_END]]
// CHECK4: cond.end:
// CHECK4-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK4-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK4-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK4: omp.inner.for.cond:
// CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK4-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK4-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK4: omp.inner.for.body:
// CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK4-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, %struct.SS*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], %struct.SS* [[TMP0]])
// CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK4: omp.inner.for.inc:
// CHECK4-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
// CHECK4-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK4: omp.inner.for.end:
// CHECK4-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK4: omp.loop.exit:
// CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK4-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..15
// CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK4-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK4-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK4-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK4-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK4-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK4-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK4-NEXT: store i32 122, i32* [[DOTOMP_UB]], align 4
// CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK4-NEXT: store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
// CHECK4-NEXT: store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
// CHECK4-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK4-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
// CHECK4-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32 35, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 61)
// CHECK4-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK4: omp.dispatch.cond:
// CHECK4-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
// CHECK4-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
// CHECK4-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK4: omp.dispatch.body:
// CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK4-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK4: omp.inner.for.cond:
// CHECK4-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
// CHECK4-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !14
// CHECK4-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
// CHECK4-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK4: omp.inner.for.body:
// CHECK4-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
// CHECK4-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
// CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK4-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !14
// CHECK4-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
// CHECK4-NEXT: [[TMP12:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !14
// CHECK4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i32 0, i32 [[TMP12]]
// CHECK4-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !14
// CHECK4-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK4: omp.body.continue:
// CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK4: omp.inner.for.inc:
// CHECK4-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
// CHECK4-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP13]], 1
// CHECK4-NEXT: store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP15:![0-9]+]]
// CHECK4: omp.inner.for.end:
// CHECK4-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK4: omp.dispatch.inc:
// CHECK4-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK4: omp.dispatch.end:
// CHECK4-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
// CHECK4-SAME: () #[[ATTR3:[0-9]+]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: call void @__tgt_register_requires(i64 1)
// CHECK4-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@_Z21teams_template_structv
// CHECK5-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[V:%.*]] = alloca [[STRUCT_SS:%.*]], align 4
// CHECK5-NEXT: [[CALL:%.*]] = call signext i32 @_ZN2SSIiLi123ELx456EE3fooEv(%struct.SS* nonnull align 4 dereferenceable(496) [[V]])
// CHECK5-NEXT: ret i32 [[CALL]]
//
//
// CHECK5-LABEL: define {{[^@]+}}@_ZN2SSIiLi123ELx456EE3fooEv
// CHECK5-SAME: (%struct.SS* nonnull align 4 dereferenceable(496) [[THIS:%.*]]) #[[ATTR0]] comdat align 2 {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK5-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8
// CHECK5-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8
// CHECK5-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8
// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOFFLOAD_BASEPTRS3:%.*]] = alloca [1 x i8*], align 8
// CHECK5-NEXT: [[DOTOFFLOAD_PTRS4:%.*]] = alloca [1 x i8*], align 8
// CHECK5-NEXT: [[DOTOFFLOAD_MAPPERS5:%.*]] = alloca [1 x i8*], align 8
// CHECK5-NEXT: [[_TMP6:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOFFLOAD_BASEPTRS10:%.*]] = alloca [1 x i8*], align 8
// CHECK5-NEXT: [[DOTOFFLOAD_PTRS11:%.*]] = alloca [1 x i8*], align 8
// CHECK5-NEXT: [[DOTOFFLOAD_MAPPERS12:%.*]] = alloca [1 x i8*], align 8
// CHECK5-NEXT: [[_TMP13:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOFFLOAD_BASEPTRS17:%.*]] = alloca [1 x i8*], align 8
// CHECK5-NEXT: [[DOTOFFLOAD_PTRS18:%.*]] = alloca [1 x i8*], align 8
// CHECK5-NEXT: [[DOTOFFLOAD_MAPPERS19:%.*]] = alloca [1 x i8*], align 8
// CHECK5-NEXT: [[_TMP20:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOFFLOAD_BASEPTRS24:%.*]] = alloca [1 x i8*], align 8
// CHECK5-NEXT: [[DOTOFFLOAD_PTRS25:%.*]] = alloca [1 x i8*], align 8
// CHECK5-NEXT: [[DOTOFFLOAD_MAPPERS26:%.*]] = alloca [1 x i8*], align 8
// CHECK5-NEXT: [[_TMP27:%.*]] = alloca i32, align 4
// CHECK5-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK5-NEXT: [[THIS1:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK5-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[THIS1]], i32 0, i32 0
// CHECK5-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK5-NEXT: [[TMP1:%.*]] = bitcast i8** [[TMP0]] to %struct.SS**
// CHECK5-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP1]], align 8
// CHECK5-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK5-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to [123 x i32]**
// CHECK5-NEXT: store [123 x i32]* [[A]], [123 x i32]** [[TMP3]], align 8
// CHECK5-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
// CHECK5-NEXT: store i8* null, i8** [[TMP4]], align 8
// CHECK5-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK5-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK5-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 123)
// CHECK5-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK5-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0
// CHECK5-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK5: omp_offload.failed:
// CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36(%struct.SS* [[THIS1]]) #[[ATTR2:[0-9]+]]
// CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK5: omp_offload.cont:
// CHECK5-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
// CHECK5-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
// CHECK5-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to %struct.SS**
// CHECK5-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP10]], align 8
// CHECK5-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
// CHECK5-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to [123 x i32]**
// CHECK5-NEXT: store [123 x i32]* [[A2]], [123 x i32]** [[TMP12]], align 8
// CHECK5-NEXT: [[TMP13:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS5]], i64 0, i64 0
// CHECK5-NEXT: store i8* null, i8** [[TMP13]], align 8
// CHECK5-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
// CHECK5-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
// CHECK5-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
// CHECK5-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l40.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK5-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
// CHECK5-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED7:%.*]], label [[OMP_OFFLOAD_CONT8:%.*]]
// CHECK5: omp_offload.failed7:
// CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l40(%struct.SS* [[THIS1]]) #[[ATTR2]]
// CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT8]]
// CHECK5: omp_offload.cont8:
// CHECK5-NEXT: [[A9:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
// CHECK5-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0
// CHECK5-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to %struct.SS**
// CHECK5-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP19]], align 8
// CHECK5-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0
// CHECK5-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to [123 x i32]**
// CHECK5-NEXT: store [123 x i32]* [[A9]], [123 x i32]** [[TMP21]], align 8
// CHECK5-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS12]], i64 0, i64 0
// CHECK5-NEXT: store i8* null, i8** [[TMP22]], align 8
// CHECK5-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0
// CHECK5-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0
// CHECK5-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
// CHECK5-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l44.region_id, i32 1, i8** [[TMP23]], i8** [[TMP24]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK5-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
// CHECK5-NEXT: br i1 [[TMP26]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]]
// CHECK5: omp_offload.failed14:
// CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l44(%struct.SS* [[THIS1]]) #[[ATTR2]]
// CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT15]]
// CHECK5: omp_offload.cont15:
// CHECK5-NEXT: [[A16:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
// CHECK5-NEXT: [[TMP27:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0
// CHECK5-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to %struct.SS**
// CHECK5-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP28]], align 8
// CHECK5-NEXT: [[TMP29:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0
// CHECK5-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to [123 x i32]**
// CHECK5-NEXT: store [123 x i32]* [[A16]], [123 x i32]** [[TMP30]], align 8
// CHECK5-NEXT: [[TMP31:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i64 0, i64 0
// CHECK5-NEXT: store i8* null, i8** [[TMP31]], align 8
// CHECK5-NEXT: [[TMP32:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0
// CHECK5-NEXT: [[TMP33:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0
// CHECK5-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
// CHECK5-NEXT: [[TMP34:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l49.region_id, i32 1, i8** [[TMP32]], i8** [[TMP33]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK5-NEXT: [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
// CHECK5-NEXT: br i1 [[TMP35]], label [[OMP_OFFLOAD_FAILED21:%.*]], label [[OMP_OFFLOAD_CONT22:%.*]]
// CHECK5: omp_offload.failed21:
// CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l49(%struct.SS* [[THIS1]]) #[[ATTR2]]
// CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT22]]
// CHECK5: omp_offload.cont22:
// CHECK5-NEXT: [[A23:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
// CHECK5-NEXT: [[TMP36:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS24]], i32 0, i32 0
// CHECK5-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to %struct.SS**
// CHECK5-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP37]], align 8
// CHECK5-NEXT: [[TMP38:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS25]], i32 0, i32 0
// CHECK5-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to [123 x i32]**
// CHECK5-NEXT: store [123 x i32]* [[A23]], [123 x i32]** [[TMP39]], align 8
// CHECK5-NEXT: [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS26]], i64 0, i64 0
// CHECK5-NEXT: store i8* null, i8** [[TMP40]], align 8
// CHECK5-NEXT: [[TMP41:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS24]], i32 0, i32 0
// CHECK5-NEXT: [[TMP42:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS25]], i32 0, i32 0
// CHECK5-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
// CHECK5-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l54.region_id, i32 1, i8** [[TMP41]], i8** [[TMP42]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK5-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0
// CHECK5-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED28:%.*]], label [[OMP_OFFLOAD_CONT29:%.*]]
// CHECK5: omp_offload.failed28:
// CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l54(%struct.SS* [[THIS1]]) #[[ATTR2]]
// CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT29]]
// CHECK5: omp_offload.cont29:
// CHECK5-NEXT: [[A30:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
// CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A30]], i64 0, i64 0
// CHECK5-NEXT: [[TMP45:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK5-NEXT: ret i32 [[TMP45]]
//
//
// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36
// CHECK5-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK5-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK5-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
// CHECK5-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK5-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK5-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK5-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK5-NEXT: store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK5-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK5-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK5-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK5-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK5-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK5-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK5: cond.true:
// CHECK5-NEXT: br label [[COND_END:%.*]]
// CHECK5: cond.false:
// CHECK5-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK5-NEXT: br label [[COND_END]]
// CHECK5: cond.end:
// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK5-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK5-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK5-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK5: omp.inner.for.cond:
// CHECK5-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK5-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK5: omp.inner.for.body:
// CHECK5-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK5-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
// CHECK5-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK5-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
// CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, %struct.SS*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], %struct.SS* [[TMP0]])
// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK5: omp.inner.for.inc:
// CHECK5-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK5-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
// CHECK5-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK5: omp.inner.for.end:
// CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK5: omp.loop.exit:
// CHECK5-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK5-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK5-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK5-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK5-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK5-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK5-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK5-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK5-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK5-NEXT: store i32 122, i32* [[DOTOMP_UB]], align 4
// CHECK5-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK5-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
// CHECK5-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK5-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
// CHECK5-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
// CHECK5-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
// CHECK5-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK5-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK5-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK5-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK5-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK5-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 122
// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK5: cond.true:
// CHECK5-NEXT: br label [[COND_END:%.*]]
// CHECK5: cond.false:
// CHECK5-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK5-NEXT: br label [[COND_END]]
// CHECK5: cond.end:
// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
// CHECK5-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK5-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK5-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK5: omp.inner.for.cond:
// CHECK5-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK5-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK5-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
// CHECK5-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK5: omp.inner.for.body:
// CHECK5-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK5-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK5-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
// CHECK5-NEXT: [[TMP11:%.*]] = load i32, i32* [[I]], align 4
// CHECK5-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
// CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i64 0, i64 [[IDXPROM]]
// CHECK5-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK5: omp.body.continue:
// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK5: omp.inner.for.inc:
// CHECK5-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
// CHECK5-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4
// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK5: omp.inner.for.end:
// CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK5: omp.loop.exit:
// CHECK5-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
// CHECK5-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l40
// CHECK5-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK5-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK5-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
// CHECK5-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..2
// CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK5-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK5-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK5-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK5-NEXT: store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK5-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK5-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK5-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK5-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK5-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK5-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK5: cond.true:
// CHECK5-NEXT: br label [[COND_END:%.*]]
// CHECK5: cond.false:
// CHECK5-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK5-NEXT: br label [[COND_END]]
// CHECK5: cond.end:
// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK5-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK5-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK5-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK5: omp.inner.for.cond:
// CHECK5-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK5-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK5: omp.inner.for.body:
// CHECK5-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK5-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
// CHECK5-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK5-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
// CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, %struct.SS*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], %struct.SS* [[TMP0]])
// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK5: omp.inner.for.inc:
// CHECK5-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK5-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
// CHECK5-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK5: omp.inner.for.end:
// CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK5: omp.loop.exit:
// CHECK5-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK5-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..3
// CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK5-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK5-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK5-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK5-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK5-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK5-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK5-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK5-NEXT: store i32 122, i32* [[DOTOMP_UB]], align 4
// CHECK5-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK5-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
// CHECK5-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK5-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
// CHECK5-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
// CHECK5-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
// CHECK5-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK5-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK5-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK5-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK5-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK5-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 122
// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK5: cond.true:
// CHECK5-NEXT: br label [[COND_END:%.*]]
// CHECK5: cond.false:
// CHECK5-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK5-NEXT: br label [[COND_END]]
// CHECK5: cond.end:
// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
// CHECK5-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK5-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK5-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK5: omp.inner.for.cond:
// CHECK5-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK5-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK5-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
// CHECK5-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK5: omp.inner.for.body:
// CHECK5-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK5-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK5-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
// CHECK5-NEXT: [[TMP11:%.*]] = load i32, i32* [[I]], align 4
// CHECK5-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
// CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i64 0, i64 [[IDXPROM]]
// CHECK5-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK5: omp.body.continue:
// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK5: omp.inner.for.inc:
// CHECK5-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
// CHECK5-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4
// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK5: omp.inner.for.end:
// CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK5: omp.loop.exit:
// CHECK5-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
// CHECK5-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l44
// CHECK5-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK5-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK5-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
// CHECK5-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..6
// CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK5-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK5-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK5-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK5-NEXT: store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK5-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK5-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK5-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK5-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK5-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK5-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK5: cond.true:
// CHECK5-NEXT: br label [[COND_END:%.*]]
// CHECK5: cond.false:
// CHECK5-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK5-NEXT: br label [[COND_END]]
// CHECK5: cond.end:
// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK5-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK5-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK5-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK5: omp.inner.for.cond:
// CHECK5-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK5-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK5: omp.inner.for.body:
// CHECK5-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK5-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
// CHECK5-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK5-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
// CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, %struct.SS*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], %struct.SS* [[TMP0]])
// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK5: omp.inner.for.inc:
// CHECK5-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK5-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
// CHECK5-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK5: omp.inner.for.end:
// CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK5: omp.loop.exit:
// CHECK5-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK5-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..7
// CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK5-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK5-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK5-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK5-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK5-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK5-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK5-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK5-NEXT: store i32 122, i32* [[DOTOMP_UB]], align 4
// CHECK5-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK5-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
// CHECK5-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK5-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
// CHECK5-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
// CHECK5-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
// CHECK5-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK5-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK5-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK5-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK5-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 61)
// CHECK5-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK5: omp.dispatch.cond:
// CHECK5-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK5-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK5-NEXT: [[CONV2:%.*]] = trunc i64 [[TMP6]] to i32
// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], [[CONV2]]
// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK5: cond.true:
// CHECK5-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK5-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP7]] to i32
// CHECK5-NEXT: br label [[COND_END:%.*]]
// CHECK5: cond.false:
// CHECK5-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK5-NEXT: br label [[COND_END]]
// CHECK5: cond.end:
// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ [[CONV3]], [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ]
// CHECK5-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK5-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK5-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
// CHECK5-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK5-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK5-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
// CHECK5-NEXT: br i1 [[CMP4]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK5: omp.dispatch.body:
// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK5: omp.inner.for.cond:
// CHECK5-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK5-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK5-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
// CHECK5-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK5: omp.inner.for.body:
// CHECK5-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP14]], 1
// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK5-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK5-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
// CHECK5-NEXT: [[TMP15:%.*]] = load i32, i32* [[I]], align 4
// CHECK5-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP15]] to i64
// CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i64 0, i64 [[IDXPROM]]
// CHECK5-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK5: omp.body.continue:
// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK5: omp.inner.for.inc:
// CHECK5-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK5-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP16]], 1
// CHECK5-NEXT: store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4
// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK5: omp.inner.for.end:
// CHECK5-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK5: omp.dispatch.inc:
// CHECK5-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK5-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK5-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
// CHECK5-NEXT: store i32 [[ADD7]], i32* [[DOTOMP_LB]], align 4
// CHECK5-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK5-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK5-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
// CHECK5-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_UB]], align 4
// CHECK5-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK5: omp.dispatch.end:
// CHECK5-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
// CHECK5-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l49
// CHECK5-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK5-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK5-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
// CHECK5-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..10
// CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK5-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK5-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK5-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK5-NEXT: store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK5-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK5-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK5-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK5-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK5-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK5-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK5: cond.true:
// CHECK5-NEXT: br label [[COND_END:%.*]]
// CHECK5: cond.false:
// CHECK5-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK5-NEXT: br label [[COND_END]]
// CHECK5: cond.end:
// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK5-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK5-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK5-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK5: omp.inner.for.cond:
// CHECK5-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK5-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK5: omp.inner.for.body:
// CHECK5-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK5-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
// CHECK5-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK5-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
// CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, %struct.SS*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], %struct.SS* [[TMP0]])
// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK5: omp.inner.for.inc:
// CHECK5-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK5-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
// CHECK5-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK5: omp.inner.for.end:
// CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK5: omp.loop.exit:
// CHECK5-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK5-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..11
// CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK5-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK5-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK5-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK5-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK5-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK5-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK5-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK5-NEXT: store i32 122, i32* [[DOTOMP_UB]], align 4
// CHECK5-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK5-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
// CHECK5-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK5-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
// CHECK5-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
// CHECK5-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
// CHECK5-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK5-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK5-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK5-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK5-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK5-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
// CHECK5-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32 1073741859, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 1)
// CHECK5-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK5: omp.dispatch.cond:
// CHECK5-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
// CHECK5-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
// CHECK5-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK5: omp.dispatch.body:
// CHECK5-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK5-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK5: omp.inner.for.cond:
// CHECK5-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
// CHECK5-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !10
// CHECK5-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
// CHECK5-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK5: omp.inner.for.body:
// CHECK5-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
// CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK5-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !10
// CHECK5-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
// CHECK5-NEXT: [[TMP12:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !10
// CHECK5-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP12]] to i64
// CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i64 0, i64 [[IDXPROM]]
// CHECK5-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !10
// CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK5: omp.body.continue:
// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK5: omp.inner.for.inc:
// CHECK5-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
// CHECK5-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP13]], 1
// CHECK5-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]]
// CHECK5: omp.inner.for.end:
// CHECK5-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK5: omp.dispatch.inc:
// CHECK5-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK5: omp.dispatch.end:
// CHECK5-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l54
// CHECK5-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK5-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK5-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
// CHECK5-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..14
// CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK5-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK5-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK5-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK5-NEXT: store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK5-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK5-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK5-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK5-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK5-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK5-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK5: cond.true:
// CHECK5-NEXT: br label [[COND_END:%.*]]
// CHECK5: cond.false:
// CHECK5-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK5-NEXT: br label [[COND_END]]
// CHECK5: cond.end:
// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK5-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK5-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK5-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK5: omp.inner.for.cond:
// CHECK5-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK5-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK5: omp.inner.for.body:
// CHECK5-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK5-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
// CHECK5-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK5-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
// CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, %struct.SS*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], %struct.SS* [[TMP0]])
// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK5: omp.inner.for.inc:
// CHECK5-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK5-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
// CHECK5-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK5: omp.inner.for.end:
// CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK5: omp.loop.exit:
// CHECK5-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK5-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..15
// CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK5-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK5-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK5-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK5-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK5-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK5-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK5-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK5-NEXT: store i32 122, i32* [[DOTOMP_UB]], align 4
// CHECK5-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK5-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
// CHECK5-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK5-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
// CHECK5-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
// CHECK5-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
// CHECK5-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK5-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK5-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK5-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK5-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK5-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
// CHECK5-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32 1073741859, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 61)
// CHECK5-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK5: omp.dispatch.cond:
// CHECK5-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
// CHECK5-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
// CHECK5-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK5: omp.dispatch.body:
// CHECK5-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK5-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK5: omp.inner.for.cond:
// CHECK5-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13
// CHECK5-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !13
// CHECK5-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
// CHECK5-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK5: omp.inner.for.body:
// CHECK5-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13
// CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK5-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !13
// CHECK5-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
// CHECK5-NEXT: [[TMP12:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !13
// CHECK5-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP12]] to i64
// CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i64 0, i64 [[IDXPROM]]
// CHECK5-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !13
// CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK5: omp.body.continue:
// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK5: omp.inner.for.inc:
// CHECK5-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13
// CHECK5-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP13]], 1
// CHECK5-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13
// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]]
// CHECK5: omp.inner.for.end:
// CHECK5-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK5: omp.dispatch.inc:
// CHECK5-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK5: omp.dispatch.end:
// CHECK5-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
// CHECK5-SAME: () #[[ATTR3:[0-9]+]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: call void @__tgt_register_requires(i64 1)
// CHECK5-NEXT: ret void
//
//
// CHECK6-LABEL: define {{[^@]+}}@_Z21teams_template_structv
// CHECK6-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK6-NEXT: entry:
// CHECK6-NEXT: [[V:%.*]] = alloca [[STRUCT_SS:%.*]], align 4
// CHECK6-NEXT: [[CALL:%.*]] = call signext i32 @_ZN2SSIiLi123ELx456EE3fooEv(%struct.SS* nonnull align 4 dereferenceable(496) [[V]])
// CHECK6-NEXT: ret i32 [[CALL]]
//
//
// CHECK6-LABEL: define {{[^@]+}}@_ZN2SSIiLi123ELx456EE3fooEv
// CHECK6-SAME: (%struct.SS* nonnull align 4 dereferenceable(496) [[THIS:%.*]]) #[[ATTR0]] comdat align 2 {
// CHECK6-NEXT: entry:
// CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK6-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8
// CHECK6-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8
// CHECK6-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8
// CHECK6-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[DOTOFFLOAD_BASEPTRS3:%.*]] = alloca [1 x i8*], align 8
// CHECK6-NEXT: [[DOTOFFLOAD_PTRS4:%.*]] = alloca [1 x i8*], align 8
// CHECK6-NEXT: [[DOTOFFLOAD_MAPPERS5:%.*]] = alloca [1 x i8*], align 8
// CHECK6-NEXT: [[_TMP6:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[DOTOFFLOAD_BASEPTRS10:%.*]] = alloca [1 x i8*], align 8
// CHECK6-NEXT: [[DOTOFFLOAD_PTRS11:%.*]] = alloca [1 x i8*], align 8
// CHECK6-NEXT: [[DOTOFFLOAD_MAPPERS12:%.*]] = alloca [1 x i8*], align 8
// CHECK6-NEXT: [[_TMP13:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[DOTOFFLOAD_BASEPTRS17:%.*]] = alloca [1 x i8*], align 8
// CHECK6-NEXT: [[DOTOFFLOAD_PTRS18:%.*]] = alloca [1 x i8*], align 8
// CHECK6-NEXT: [[DOTOFFLOAD_MAPPERS19:%.*]] = alloca [1 x i8*], align 8
// CHECK6-NEXT: [[_TMP20:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[DOTOFFLOAD_BASEPTRS24:%.*]] = alloca [1 x i8*], align 8
// CHECK6-NEXT: [[DOTOFFLOAD_PTRS25:%.*]] = alloca [1 x i8*], align 8
// CHECK6-NEXT: [[DOTOFFLOAD_MAPPERS26:%.*]] = alloca [1 x i8*], align 8
// CHECK6-NEXT: [[_TMP27:%.*]] = alloca i32, align 4
// CHECK6-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK6-NEXT: [[THIS1:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK6-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[THIS1]], i32 0, i32 0
// CHECK6-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK6-NEXT: [[TMP1:%.*]] = bitcast i8** [[TMP0]] to %struct.SS**
// CHECK6-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP1]], align 8
// CHECK6-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK6-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to [123 x i32]**
// CHECK6-NEXT: store [123 x i32]* [[A]], [123 x i32]** [[TMP3]], align 8
// CHECK6-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
// CHECK6-NEXT: store i8* null, i8** [[TMP4]], align 8
// CHECK6-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK6-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK6-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 123)
// CHECK6-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK6-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0
// CHECK6-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK6: omp_offload.failed:
// CHECK6-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36(%struct.SS* [[THIS1]]) #[[ATTR2:[0-9]+]]
// CHECK6-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK6: omp_offload.cont:
// CHECK6-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
// CHECK6-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
// CHECK6-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to %struct.SS**
// CHECK6-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP10]], align 8
// CHECK6-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
// CHECK6-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to [123 x i32]**
// CHECK6-NEXT: store [123 x i32]* [[A2]], [123 x i32]** [[TMP12]], align 8
// CHECK6-NEXT: [[TMP13:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS5]], i64 0, i64 0
// CHECK6-NEXT: store i8* null, i8** [[TMP13]], align 8
// CHECK6-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
// CHECK6-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
// CHECK6-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
// CHECK6-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l40.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK6-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
// CHECK6-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED7:%.*]], label [[OMP_OFFLOAD_CONT8:%.*]]
// CHECK6: omp_offload.failed7:
// CHECK6-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l40(%struct.SS* [[THIS1]]) #[[ATTR2]]
// CHECK6-NEXT: br label [[OMP_OFFLOAD_CONT8]]
// CHECK6: omp_offload.cont8:
// CHECK6-NEXT: [[A9:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
// CHECK6-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0
// CHECK6-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to %struct.SS**
// CHECK6-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP19]], align 8
// CHECK6-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0
// CHECK6-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to [123 x i32]**
// CHECK6-NEXT: store [123 x i32]* [[A9]], [123 x i32]** [[TMP21]], align 8
// CHECK6-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS12]], i64 0, i64 0
// CHECK6-NEXT: store i8* null, i8** [[TMP22]], align 8
// CHECK6-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0
// CHECK6-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0
// CHECK6-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
// CHECK6-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l44.region_id, i32 1, i8** [[TMP23]], i8** [[TMP24]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK6-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
// CHECK6-NEXT: br i1 [[TMP26]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]]
// CHECK6: omp_offload.failed14:
// CHECK6-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l44(%struct.SS* [[THIS1]]) #[[ATTR2]]
// CHECK6-NEXT: br label [[OMP_OFFLOAD_CONT15]]
// CHECK6: omp_offload.cont15:
// CHECK6-NEXT: [[A16:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
// CHECK6-NEXT: [[TMP27:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0
// CHECK6-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to %struct.SS**
// CHECK6-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP28]], align 8
// CHECK6-NEXT: [[TMP29:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0
// CHECK6-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to [123 x i32]**
// CHECK6-NEXT: store [123 x i32]* [[A16]], [123 x i32]** [[TMP30]], align 8
// CHECK6-NEXT: [[TMP31:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i64 0, i64 0
// CHECK6-NEXT: store i8* null, i8** [[TMP31]], align 8
// CHECK6-NEXT: [[TMP32:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0
// CHECK6-NEXT: [[TMP33:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0
// CHECK6-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
// CHECK6-NEXT: [[TMP34:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l49.region_id, i32 1, i8** [[TMP32]], i8** [[TMP33]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK6-NEXT: [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
// CHECK6-NEXT: br i1 [[TMP35]], label [[OMP_OFFLOAD_FAILED21:%.*]], label [[OMP_OFFLOAD_CONT22:%.*]]
// CHECK6: omp_offload.failed21:
// CHECK6-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l49(%struct.SS* [[THIS1]]) #[[ATTR2]]
// CHECK6-NEXT: br label [[OMP_OFFLOAD_CONT22]]
// CHECK6: omp_offload.cont22:
// CHECK6-NEXT: [[A23:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
// CHECK6-NEXT: [[TMP36:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS24]], i32 0, i32 0
// CHECK6-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to %struct.SS**
// CHECK6-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP37]], align 8
// CHECK6-NEXT: [[TMP38:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS25]], i32 0, i32 0
// CHECK6-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to [123 x i32]**
// CHECK6-NEXT: store [123 x i32]* [[A23]], [123 x i32]** [[TMP39]], align 8
// CHECK6-NEXT: [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS26]], i64 0, i64 0
// CHECK6-NEXT: store i8* null, i8** [[TMP40]], align 8
// CHECK6-NEXT: [[TMP41:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS24]], i32 0, i32 0
// CHECK6-NEXT: [[TMP42:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS25]], i32 0, i32 0
// CHECK6-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
// CHECK6-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l54.region_id, i32 1, i8** [[TMP41]], i8** [[TMP42]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK6-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0
// CHECK6-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED28:%.*]], label [[OMP_OFFLOAD_CONT29:%.*]]
// CHECK6: omp_offload.failed28:
// CHECK6-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l54(%struct.SS* [[THIS1]]) #[[ATTR2]]
// CHECK6-NEXT: br label [[OMP_OFFLOAD_CONT29]]
// CHECK6: omp_offload.cont29:
// CHECK6-NEXT: [[A30:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
// CHECK6-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A30]], i64 0, i64 0
// CHECK6-NEXT: [[TMP45:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK6-NEXT: ret i32 [[TMP45]]
//
//
// CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36
// CHECK6-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK6-NEXT: entry:
// CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK6-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK6-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK6-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
// CHECK6-NEXT: ret void
//
//
// CHECK6-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK6-NEXT: entry:
// CHECK6-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK6-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK6-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK6-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK6-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK6-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK6-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK6-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK6-NEXT: store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK6-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK6-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK6-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK6-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK6-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK6-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK6-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
// CHECK6-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK6: cond.true:
// CHECK6-NEXT: br label [[COND_END:%.*]]
// CHECK6: cond.false:
// CHECK6-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK6-NEXT: br label [[COND_END]]
// CHECK6: cond.end:
// CHECK6-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK6-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK6-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK6-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK6-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK6: omp.inner.for.cond:
// CHECK6-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK6-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK6-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK6-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK6: omp.inner.for.body:
// CHECK6-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK6-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
// CHECK6-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK6-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
// CHECK6-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, %struct.SS*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], %struct.SS* [[TMP0]])
// CHECK6-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK6: omp.inner.for.inc:
// CHECK6-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK6-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK6-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
// CHECK6-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK6-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK6: omp.inner.for.end:
// CHECK6-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK6: omp.loop.exit:
// CHECK6-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK6-NEXT: ret void
//
//
// CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK6-NEXT: entry:
// CHECK6-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK6-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK6-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK6-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK6-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK6-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK6-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK6-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK6-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK6-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK6-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK6-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK6-NEXT: store i32 122, i32* [[DOTOMP_UB]], align 4
// CHECK6-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK6-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
// CHECK6-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK6-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
// CHECK6-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
// CHECK6-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
// CHECK6-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK6-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK6-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK6-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK6-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK6-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK6-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 122
// CHECK6-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK6: cond.true:
// CHECK6-NEXT: br label [[COND_END:%.*]]
// CHECK6: cond.false:
// CHECK6-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK6-NEXT: br label [[COND_END]]
// CHECK6: cond.end:
// CHECK6-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
// CHECK6-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK6-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK6-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
// CHECK6-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK6: omp.inner.for.cond:
// CHECK6-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK6-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK6-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
// CHECK6-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK6: omp.inner.for.body:
// CHECK6-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK6-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
// CHECK6-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK6-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK6-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
// CHECK6-NEXT: [[TMP11:%.*]] = load i32, i32* [[I]], align 4
// CHECK6-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
// CHECK6-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i64 0, i64 [[IDXPROM]]
// CHECK6-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK6-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK6: omp.body.continue:
// CHECK6-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK6: omp.inner.for.inc:
// CHECK6-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK6-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
// CHECK6-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4
// CHECK6-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK6: omp.inner.for.end:
// CHECK6-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK6: omp.loop.exit:
// CHECK6-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
// CHECK6-NEXT: ret void
//
//
// CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l40
// CHECK6-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK6-NEXT: entry:
// CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK6-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK6-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK6-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
// CHECK6-NEXT: ret void
//
//
// CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..2
// CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK6-NEXT: entry:
// CHECK6-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK6-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK6-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK6-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK6-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK6-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK6-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK6-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK6-NEXT: store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK6-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK6-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK6-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK6-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK6-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK6-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK6-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
// CHECK6-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK6: cond.true:
// CHECK6-NEXT: br label [[COND_END:%.*]]
// CHECK6: cond.false:
// CHECK6-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK6-NEXT: br label [[COND_END]]
// CHECK6: cond.end:
// CHECK6-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK6-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK6-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK6-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK6-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK6: omp.inner.for.cond:
// CHECK6-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK6-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK6-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK6-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK6: omp.inner.for.body:
// CHECK6-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK6-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
// CHECK6-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK6-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
// CHECK6-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, %struct.SS*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], %struct.SS* [[TMP0]])
// CHECK6-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK6: omp.inner.for.inc:
// CHECK6-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK6-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK6-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
// CHECK6-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK6-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK6: omp.inner.for.end:
// CHECK6-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK6: omp.loop.exit:
// CHECK6-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK6-NEXT: ret void
//
//
// CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..3
// CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK6-NEXT: entry:
// CHECK6-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK6-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK6-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK6-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK6-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK6-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK6-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK6-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK6-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK6-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK6-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK6-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK6-NEXT: store i32 122, i32* [[DOTOMP_UB]], align 4
// CHECK6-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK6-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
// CHECK6-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK6-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
// CHECK6-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
// CHECK6-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
// CHECK6-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK6-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK6-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK6-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK6-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK6-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK6-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 122
// CHECK6-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK6: cond.true:
// CHECK6-NEXT: br label [[COND_END:%.*]]
// CHECK6: cond.false:
// CHECK6-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK6-NEXT: br label [[COND_END]]
// CHECK6: cond.end:
// CHECK6-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
// CHECK6-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK6-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK6-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
// CHECK6-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK6: omp.inner.for.cond:
// CHECK6-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK6-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK6-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
// CHECK6-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK6: omp.inner.for.body:
// CHECK6-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK6-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
// CHECK6-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK6-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK6-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
// CHECK6-NEXT: [[TMP11:%.*]] = load i32, i32* [[I]], align 4
// CHECK6-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
// CHECK6-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i64 0, i64 [[IDXPROM]]
// CHECK6-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK6-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK6: omp.body.continue:
// CHECK6-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK6: omp.inner.for.inc:
// CHECK6-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK6-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
// CHECK6-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4
// CHECK6-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK6: omp.inner.for.end:
// CHECK6-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK6: omp.loop.exit:
// CHECK6-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
// CHECK6-NEXT: ret void
//
//
// CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l44
// CHECK6-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK6-NEXT: entry:
// CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK6-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK6-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK6-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
// CHECK6-NEXT: ret void
//
//
// CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..6
// CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK6-NEXT: entry:
// CHECK6-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK6-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK6-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK6-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK6-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK6-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK6-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK6-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK6-NEXT: store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK6-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK6-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK6-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK6-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK6-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK6-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK6-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
// CHECK6-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK6: cond.true:
// CHECK6-NEXT: br label [[COND_END:%.*]]
// CHECK6: cond.false:
// CHECK6-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK6-NEXT: br label [[COND_END]]
// CHECK6: cond.end:
// CHECK6-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK6-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK6-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK6-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK6-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK6: omp.inner.for.cond:
// CHECK6-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK6-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK6-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK6-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK6: omp.inner.for.body:
// CHECK6-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK6-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
// CHECK6-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK6-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
// CHECK6-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, %struct.SS*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], %struct.SS* [[TMP0]])
// CHECK6-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK6: omp.inner.for.inc:
// CHECK6-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK6-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK6-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
// CHECK6-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK6-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK6: omp.inner.for.end:
// CHECK6-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK6: omp.loop.exit:
// CHECK6-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK6-NEXT: ret void
//
//
// CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..7
// CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK6-NEXT: entry:
// CHECK6-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK6-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK6-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK6-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK6-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK6-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK6-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK6-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK6-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK6-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK6-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK6-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK6-NEXT: store i32 122, i32* [[DOTOMP_UB]], align 4
// CHECK6-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK6-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
// CHECK6-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK6-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
// CHECK6-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
// CHECK6-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
// CHECK6-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK6-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK6-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK6-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK6-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 61)
// CHECK6-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK6: omp.dispatch.cond:
// CHECK6-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK6-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK6-NEXT: [[CONV2:%.*]] = trunc i64 [[TMP6]] to i32
// CHECK6-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], [[CONV2]]
// CHECK6-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK6: cond.true:
// CHECK6-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK6-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP7]] to i32
// CHECK6-NEXT: br label [[COND_END:%.*]]
// CHECK6: cond.false:
// CHECK6-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK6-NEXT: br label [[COND_END]]
// CHECK6: cond.end:
// CHECK6-NEXT: [[COND:%.*]] = phi i32 [ [[CONV3]], [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ]
// CHECK6-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK6-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK6-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
// CHECK6-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK6-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK6-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
// CHECK6-NEXT: br i1 [[CMP4]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK6: omp.dispatch.body:
// CHECK6-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK6: omp.inner.for.cond:
// CHECK6-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK6-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK6-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
// CHECK6-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK6: omp.inner.for.body:
// CHECK6-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK6-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP14]], 1
// CHECK6-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK6-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK6-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
// CHECK6-NEXT: [[TMP15:%.*]] = load i32, i32* [[I]], align 4
// CHECK6-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP15]] to i64
// CHECK6-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i64 0, i64 [[IDXPROM]]
// CHECK6-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK6-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK6: omp.body.continue:
// CHECK6-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK6: omp.inner.for.inc:
// CHECK6-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK6-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP16]], 1
// CHECK6-NEXT: store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4
// CHECK6-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK6: omp.inner.for.end:
// CHECK6-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK6: omp.dispatch.inc:
// CHECK6-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK6-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK6-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
// CHECK6-NEXT: store i32 [[ADD7]], i32* [[DOTOMP_LB]], align 4
// CHECK6-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK6-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK6-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
// CHECK6-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_UB]], align 4
// CHECK6-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK6: omp.dispatch.end:
// CHECK6-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
// CHECK6-NEXT: ret void
//
//
// CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l49
// CHECK6-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK6-NEXT: entry:
// CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK6-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK6-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK6-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
// CHECK6-NEXT: ret void
//
//
// CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..10
// CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK6-NEXT: entry:
// CHECK6-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK6-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK6-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK6-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK6-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK6-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK6-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK6-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK6-NEXT: store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK6-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK6-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK6-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK6-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK6-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK6-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK6-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
// CHECK6-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK6: cond.true:
// CHECK6-NEXT: br label [[COND_END:%.*]]
// CHECK6: cond.false:
// CHECK6-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK6-NEXT: br label [[COND_END]]
// CHECK6: cond.end:
// CHECK6-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK6-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK6-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK6-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK6-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK6: omp.inner.for.cond:
// CHECK6-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK6-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK6-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK6-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK6: omp.inner.for.body:
// CHECK6-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK6-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
// CHECK6-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK6-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
// CHECK6-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, %struct.SS*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], %struct.SS* [[TMP0]])
// CHECK6-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK6: omp.inner.for.inc:
// CHECK6-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK6-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK6-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
// CHECK6-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK6-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK6: omp.inner.for.end:
// CHECK6-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK6: omp.loop.exit:
// CHECK6-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK6-NEXT: ret void
//
//
// CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..11
// CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK6-NEXT: entry:
// CHECK6-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK6-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK6-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK6-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK6-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK6-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK6-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK6-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK6-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK6-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK6-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK6-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK6-NEXT: store i32 122, i32* [[DOTOMP_UB]], align 4
// CHECK6-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK6-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
// CHECK6-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK6-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
// CHECK6-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
// CHECK6-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
// CHECK6-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK6-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK6-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK6-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK6-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK6-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
// CHECK6-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32 1073741859, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 1)
// CHECK6-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK6: omp.dispatch.cond:
// CHECK6-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
// CHECK6-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
// CHECK6-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK6: omp.dispatch.body:
// CHECK6-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK6-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
// CHECK6-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK6: omp.inner.for.cond:
// CHECK6-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
// CHECK6-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !10
// CHECK6-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
// CHECK6-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK6: omp.inner.for.body:
// CHECK6-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
// CHECK6-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
// CHECK6-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK6-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !10
// CHECK6-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
// CHECK6-NEXT: [[TMP12:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !10
// CHECK6-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP12]] to i64
// CHECK6-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i64 0, i64 [[IDXPROM]]
// CHECK6-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !10
// CHECK6-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK6: omp.body.continue:
// CHECK6-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK6: omp.inner.for.inc:
// CHECK6-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
// CHECK6-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP13]], 1
// CHECK6-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10
// CHECK6-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]]
// CHECK6: omp.inner.for.end:
// CHECK6-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK6: omp.dispatch.inc:
// CHECK6-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK6: omp.dispatch.end:
// CHECK6-NEXT: ret void
//
//
// CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l54
// CHECK6-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK6-NEXT: entry:
// CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK6-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK6-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK6-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
// CHECK6-NEXT: ret void
//
//
// CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..14
// CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK6-NEXT: entry:
// CHECK6-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK6-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK6-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK6-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK6-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK6-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK6-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK6-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK6-NEXT: store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK6-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK6-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK6-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK6-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK6-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK6-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK6-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
// CHECK6-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK6: cond.true:
// CHECK6-NEXT: br label [[COND_END:%.*]]
// CHECK6: cond.false:
// CHECK6-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK6-NEXT: br label [[COND_END]]
// CHECK6: cond.end:
// CHECK6-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK6-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK6-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK6-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK6-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK6: omp.inner.for.cond:
// CHECK6-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK6-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK6-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK6-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK6: omp.inner.for.body:
// CHECK6-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK6-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
// CHECK6-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK6-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
// CHECK6-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, %struct.SS*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], %struct.SS* [[TMP0]])
// CHECK6-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK6: omp.inner.for.inc:
// CHECK6-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK6-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK6-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
// CHECK6-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK6-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK6: omp.inner.for.end:
// CHECK6-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK6: omp.loop.exit:
// CHECK6-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK6-NEXT: ret void
//
//
// CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..15
// CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK6-NEXT: entry:
// CHECK6-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK6-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK6-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK6-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK6-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK6-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK6-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK6-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK6-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK6-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK6-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK6-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK6-NEXT: store i32 122, i32* [[DOTOMP_UB]], align 4
// CHECK6-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK6-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
// CHECK6-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK6-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
// CHECK6-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
// CHECK6-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
// CHECK6-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK6-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK6-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK6-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK6-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK6-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
// CHECK6-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32 1073741859, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 61)
// CHECK6-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK6: omp.dispatch.cond:
// CHECK6-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
// CHECK6-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
// CHECK6-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK6: omp.dispatch.body:
// CHECK6-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK6-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
// CHECK6-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK6: omp.inner.for.cond:
// CHECK6-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13
// CHECK6-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !13
// CHECK6-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
// CHECK6-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK6: omp.inner.for.body:
// CHECK6-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13
// CHECK6-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
// CHECK6-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK6-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !13
// CHECK6-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
// CHECK6-NEXT: [[TMP12:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !13
// CHECK6-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP12]] to i64
// CHECK6-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i64 0, i64 [[IDXPROM]]
// CHECK6-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !13
// CHECK6-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK6: omp.body.continue:
// CHECK6-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK6: omp.inner.for.inc:
// CHECK6-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13
// CHECK6-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP13]], 1
// CHECK6-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13
// CHECK6-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]]
// CHECK6: omp.inner.for.end:
// CHECK6-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK6: omp.dispatch.inc:
// CHECK6-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK6: omp.dispatch.end:
// CHECK6-NEXT: ret void
//
//
// CHECK6-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
// CHECK6-SAME: () #[[ATTR3:[0-9]+]] {
// CHECK6-NEXT: entry:
// CHECK6-NEXT: call void @__tgt_register_requires(i64 1)
// CHECK6-NEXT: ret void
//
//
// CHECK7-LABEL: define {{[^@]+}}@_Z21teams_template_structv
// CHECK7-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: [[V:%.*]] = alloca [[STRUCT_SS:%.*]], align 4
// CHECK7-NEXT: [[CALL:%.*]] = call i32 @_ZN2SSIiLi123ELx456EE3fooEv(%struct.SS* nonnull align 4 dereferenceable(496) [[V]])
// CHECK7-NEXT: ret i32 [[CALL]]
//
//
// CHECK7-LABEL: define {{[^@]+}}@_ZN2SSIiLi123ELx456EE3fooEv
// CHECK7-SAME: (%struct.SS* nonnull align 4 dereferenceable(496) [[THIS:%.*]]) #[[ATTR0]] comdat align 2 {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK7-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 4
// CHECK7-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 4
// CHECK7-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 4
// CHECK7-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOFFLOAD_BASEPTRS3:%.*]] = alloca [1 x i8*], align 4
// CHECK7-NEXT: [[DOTOFFLOAD_PTRS4:%.*]] = alloca [1 x i8*], align 4
// CHECK7-NEXT: [[DOTOFFLOAD_MAPPERS5:%.*]] = alloca [1 x i8*], align 4
// CHECK7-NEXT: [[_TMP6:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOFFLOAD_BASEPTRS10:%.*]] = alloca [1 x i8*], align 4
// CHECK7-NEXT: [[DOTOFFLOAD_PTRS11:%.*]] = alloca [1 x i8*], align 4
// CHECK7-NEXT: [[DOTOFFLOAD_MAPPERS12:%.*]] = alloca [1 x i8*], align 4
// CHECK7-NEXT: [[_TMP13:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOFFLOAD_BASEPTRS17:%.*]] = alloca [1 x i8*], align 4
// CHECK7-NEXT: [[DOTOFFLOAD_PTRS18:%.*]] = alloca [1 x i8*], align 4
// CHECK7-NEXT: [[DOTOFFLOAD_MAPPERS19:%.*]] = alloca [1 x i8*], align 4
// CHECK7-NEXT: [[_TMP20:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOFFLOAD_BASEPTRS24:%.*]] = alloca [1 x i8*], align 4
// CHECK7-NEXT: [[DOTOFFLOAD_PTRS25:%.*]] = alloca [1 x i8*], align 4
// CHECK7-NEXT: [[DOTOFFLOAD_MAPPERS26:%.*]] = alloca [1 x i8*], align 4
// CHECK7-NEXT: [[_TMP27:%.*]] = alloca i32, align 4
// CHECK7-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK7-NEXT: [[THIS1:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK7-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[THIS1]], i32 0, i32 0
// CHECK7-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK7-NEXT: [[TMP1:%.*]] = bitcast i8** [[TMP0]] to %struct.SS**
// CHECK7-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP1]], align 4
// CHECK7-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK7-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to [123 x i32]**
// CHECK7-NEXT: store [123 x i32]* [[A]], [123 x i32]** [[TMP3]], align 4
// CHECK7-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// CHECK7-NEXT: store i8* null, i8** [[TMP4]], align 4
// CHECK7-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK7-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK7-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 123)
// CHECK7-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK7-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0
// CHECK7-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK7: omp_offload.failed:
// CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36(%struct.SS* [[THIS1]]) #[[ATTR2:[0-9]+]]
// CHECK7-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK7: omp_offload.cont:
// CHECK7-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
// CHECK7-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
// CHECK7-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to %struct.SS**
// CHECK7-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP10]], align 4
// CHECK7-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
// CHECK7-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to [123 x i32]**
// CHECK7-NEXT: store [123 x i32]* [[A2]], [123 x i32]** [[TMP12]], align 4
// CHECK7-NEXT: [[TMP13:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS5]], i32 0, i32 0
// CHECK7-NEXT: store i8* null, i8** [[TMP13]], align 4
// CHECK7-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
// CHECK7-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
// CHECK7-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
// CHECK7-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l40.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK7-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
// CHECK7-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED7:%.*]], label [[OMP_OFFLOAD_CONT8:%.*]]
// CHECK7: omp_offload.failed7:
// CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l40(%struct.SS* [[THIS1]]) #[[ATTR2]]
// CHECK7-NEXT: br label [[OMP_OFFLOAD_CONT8]]
// CHECK7: omp_offload.cont8:
// CHECK7-NEXT: [[A9:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
// CHECK7-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0
// CHECK7-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to %struct.SS**
// CHECK7-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP19]], align 4
// CHECK7-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0
// CHECK7-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to [123 x i32]**
// CHECK7-NEXT: store [123 x i32]* [[A9]], [123 x i32]** [[TMP21]], align 4
// CHECK7-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS12]], i32 0, i32 0
// CHECK7-NEXT: store i8* null, i8** [[TMP22]], align 4
// CHECK7-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0
// CHECK7-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0
// CHECK7-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
// CHECK7-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l44.region_id, i32 1, i8** [[TMP23]], i8** [[TMP24]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK7-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
// CHECK7-NEXT: br i1 [[TMP26]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]]
// CHECK7: omp_offload.failed14:
// CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l44(%struct.SS* [[THIS1]]) #[[ATTR2]]
// CHECK7-NEXT: br label [[OMP_OFFLOAD_CONT15]]
// CHECK7: omp_offload.cont15:
// CHECK7-NEXT: [[A16:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
// CHECK7-NEXT: [[TMP27:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0
// CHECK7-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to %struct.SS**
// CHECK7-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP28]], align 4
// CHECK7-NEXT: [[TMP29:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0
// CHECK7-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to [123 x i32]**
// CHECK7-NEXT: store [123 x i32]* [[A16]], [123 x i32]** [[TMP30]], align 4
// CHECK7-NEXT: [[TMP31:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 0
// CHECK7-NEXT: store i8* null, i8** [[TMP31]], align 4
// CHECK7-NEXT: [[TMP32:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0
// CHECK7-NEXT: [[TMP33:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0
// CHECK7-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
// CHECK7-NEXT: [[TMP34:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l49.region_id, i32 1, i8** [[TMP32]], i8** [[TMP33]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK7-NEXT: [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
// CHECK7-NEXT: br i1 [[TMP35]], label [[OMP_OFFLOAD_FAILED21:%.*]], label [[OMP_OFFLOAD_CONT22:%.*]]
// CHECK7: omp_offload.failed21:
// CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l49(%struct.SS* [[THIS1]]) #[[ATTR2]]
// CHECK7-NEXT: br label [[OMP_OFFLOAD_CONT22]]
// CHECK7: omp_offload.cont22:
// CHECK7-NEXT: [[A23:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
// CHECK7-NEXT: [[TMP36:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS24]], i32 0, i32 0
// CHECK7-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to %struct.SS**
// CHECK7-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP37]], align 4
// CHECK7-NEXT: [[TMP38:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS25]], i32 0, i32 0
// CHECK7-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to [123 x i32]**
// CHECK7-NEXT: store [123 x i32]* [[A23]], [123 x i32]** [[TMP39]], align 4
// CHECK7-NEXT: [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS26]], i32 0, i32 0
// CHECK7-NEXT: store i8* null, i8** [[TMP40]], align 4
// CHECK7-NEXT: [[TMP41:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS24]], i32 0, i32 0
// CHECK7-NEXT: [[TMP42:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS25]], i32 0, i32 0
// CHECK7-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
// CHECK7-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l54.region_id, i32 1, i8** [[TMP41]], i8** [[TMP42]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK7-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0
// CHECK7-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED28:%.*]], label [[OMP_OFFLOAD_CONT29:%.*]]
// CHECK7: omp_offload.failed28:
// CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l54(%struct.SS* [[THIS1]]) #[[ATTR2]]
// CHECK7-NEXT: br label [[OMP_OFFLOAD_CONT29]]
// CHECK7: omp_offload.cont29:
// CHECK7-NEXT: [[A30:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
// CHECK7-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A30]], i32 0, i32 0
// CHECK7-NEXT: [[TMP45:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK7-NEXT: ret i32 [[TMP45]]
//
//
// CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36
// CHECK7-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK7-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK7-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK7-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
// CHECK7-NEXT: ret void
//
//
// CHECK7-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK7-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK7-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK7-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK7-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK7-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK7-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK7-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK7-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK7-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK7-NEXT: store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK7-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK7-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK7-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK7-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK7-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK7-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK7-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
// CHECK7-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK7: cond.true:
// CHECK7-NEXT: br label [[COND_END:%.*]]
// CHECK7: cond.false:
// CHECK7-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK7-NEXT: br label [[COND_END]]
// CHECK7: cond.end:
// CHECK7-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK7-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK7-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK7-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK7: omp.inner.for.cond:
// CHECK7-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK7-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK7-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK7-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK7: omp.inner.for.body:
// CHECK7-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK7-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK7-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, %struct.SS*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], %struct.SS* [[TMP0]])
// CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK7: omp.inner.for.inc:
// CHECK7-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK7-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
// CHECK7-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK7: omp.inner.for.end:
// CHECK7-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK7: omp.loop.exit:
// CHECK7-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK7-NEXT: ret void
//
//
// CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK7-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK7-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK7-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK7-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK7-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK7-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK7-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK7-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK7-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK7-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK7-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK7-NEXT: store i32 122, i32* [[DOTOMP_UB]], align 4
// CHECK7-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK7-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK7-NEXT: store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
// CHECK7-NEXT: store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
// CHECK7-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK7-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK7-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK7-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK7-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK7-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK7-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 122
// CHECK7-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK7: cond.true:
// CHECK7-NEXT: br label [[COND_END:%.*]]
// CHECK7: cond.false:
// CHECK7-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK7-NEXT: br label [[COND_END]]
// CHECK7: cond.end:
// CHECK7-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
// CHECK7-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK7-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK7-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK7: omp.inner.for.cond:
// CHECK7-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK7-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK7-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
// CHECK7-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK7: omp.inner.for.body:
// CHECK7-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
// CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK7-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK7-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
// CHECK7-NEXT: [[TMP11:%.*]] = load i32, i32* [[I]], align 4
// CHECK7-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i32 0, i32 [[TMP11]]
// CHECK7-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK7: omp.body.continue:
// CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK7: omp.inner.for.inc:
// CHECK7-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK7-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP12]], 1
// CHECK7-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4
// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK7: omp.inner.for.end:
// CHECK7-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK7: omp.loop.exit:
// CHECK7-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
// CHECK7-NEXT: ret void
//
//
// CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l40
// CHECK7-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK7-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK7-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK7-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
// CHECK7-NEXT: ret void
//
//
// CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..2
// CHECK7-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK7-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK7-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK7-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK7-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK7-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK7-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK7-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK7-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK7-NEXT: store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK7-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK7-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK7-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK7-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK7-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK7-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK7-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
// CHECK7-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK7: cond.true:
// CHECK7-NEXT: br label [[COND_END:%.*]]
// CHECK7: cond.false:
// CHECK7-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK7-NEXT: br label [[COND_END]]
// CHECK7: cond.end:
// CHECK7-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK7-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK7-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK7-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK7: omp.inner.for.cond:
// CHECK7-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK7-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK7-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK7-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK7: omp.inner.for.body:
// CHECK7-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK7-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK7-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, %struct.SS*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], %struct.SS* [[TMP0]])
// CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK7: omp.inner.for.inc:
// CHECK7-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK7-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
// CHECK7-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK7: omp.inner.for.end:
// CHECK7-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK7: omp.loop.exit:
// CHECK7-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK7-NEXT: ret void
//
//
// CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..3
// CHECK7-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK7-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK7-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK7-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK7-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK7-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK7-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK7-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK7-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK7-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK7-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK7-NEXT: store i32 122, i32* [[DOTOMP_UB]], align 4
// CHECK7-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK7-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK7-NEXT: store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
// CHECK7-NEXT: store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
// CHECK7-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK7-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK7-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK7-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK7-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK7-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK7-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 122
// CHECK7-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK7: cond.true:
// CHECK7-NEXT: br label [[COND_END:%.*]]
// CHECK7: cond.false:
// CHECK7-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK7-NEXT: br label [[COND_END]]
// CHECK7: cond.end:
// CHECK7-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
// CHECK7-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK7-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK7-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK7: omp.inner.for.cond:
// CHECK7-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK7-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK7-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
// CHECK7-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK7: omp.inner.for.body:
// CHECK7-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
// CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK7-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK7-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
// CHECK7-NEXT: [[TMP11:%.*]] = load i32, i32* [[I]], align 4
// CHECK7-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i32 0, i32 [[TMP11]]
// CHECK7-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK7: omp.body.continue:
// CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK7: omp.inner.for.inc:
// CHECK7-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK7-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP12]], 1
// CHECK7-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4
// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK7: omp.inner.for.end:
// CHECK7-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK7: omp.loop.exit:
// CHECK7-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
// CHECK7-NEXT: ret void
//
//
// CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l44
// CHECK7-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK7-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK7-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK7-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
// CHECK7-NEXT: ret void
//
//
// CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..6
// CHECK7-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK7-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK7-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK7-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK7-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK7-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK7-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK7-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK7-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK7-NEXT: store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK7-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK7-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK7-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK7-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK7-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK7-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK7-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
// CHECK7-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK7: cond.true:
// CHECK7-NEXT: br label [[COND_END:%.*]]
// CHECK7: cond.false:
// CHECK7-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK7-NEXT: br label [[COND_END]]
// CHECK7: cond.end:
// CHECK7-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK7-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK7-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK7-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK7: omp.inner.for.cond:
// CHECK7-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK7-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK7-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK7-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK7: omp.inner.for.body:
// CHECK7-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK7-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK7-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, %struct.SS*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], %struct.SS* [[TMP0]])
// CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK7: omp.inner.for.inc:
// CHECK7-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK7-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
// CHECK7-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK7: omp.inner.for.end:
// CHECK7-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK7: omp.loop.exit:
// CHECK7-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK7-NEXT: ret void
//
//
// CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..7
// CHECK7-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK7-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK7-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK7-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK7-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK7-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK7-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK7-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK7-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK7-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK7-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK7-NEXT: store i32 122, i32* [[DOTOMP_UB]], align 4
// CHECK7-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK7-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK7-NEXT: store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
// CHECK7-NEXT: store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
// CHECK7-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK7-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK7-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK7-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK7-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 61)
// CHECK7-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK7: omp.dispatch.cond:
// CHECK7-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK7-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK7-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], [[TMP6]]
// CHECK7-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK7: cond.true:
// CHECK7-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK7-NEXT: br label [[COND_END:%.*]]
// CHECK7: cond.false:
// CHECK7-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK7-NEXT: br label [[COND_END]]
// CHECK7: cond.end:
// CHECK7-NEXT: [[COND:%.*]] = phi i32 [ [[TMP7]], [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ]
// CHECK7-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK7-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK7-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
// CHECK7-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK7-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK7-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
// CHECK7-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK7: omp.dispatch.body:
// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK7: omp.inner.for.cond:
// CHECK7-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK7-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK7-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
// CHECK7-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK7: omp.inner.for.body:
// CHECK7-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP14]], 1
// CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK7-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK7-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
// CHECK7-NEXT: [[TMP15:%.*]] = load i32, i32* [[I]], align 4
// CHECK7-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i32 0, i32 [[TMP15]]
// CHECK7-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK7: omp.body.continue:
// CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK7: omp.inner.for.inc:
// CHECK7-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK7-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP16]], 1
// CHECK7-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4
// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK7: omp.inner.for.end:
// CHECK7-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK7: omp.dispatch.inc:
// CHECK7-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK7-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK7-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
// CHECK7-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_LB]], align 4
// CHECK7-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK7-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK7-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
// CHECK7-NEXT: store i32 [[ADD5]], i32* [[DOTOMP_UB]], align 4
// CHECK7-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK7: omp.dispatch.end:
// CHECK7-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
// CHECK7-NEXT: ret void
//
//
// CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l49
// CHECK7-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK7-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK7-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK7-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
// CHECK7-NEXT: ret void
//
//
// CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..10
// CHECK7-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK7-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK7-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK7-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK7-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK7-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK7-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK7-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK7-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK7-NEXT: store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK7-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK7-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK7-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK7-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK7-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK7-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK7-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
// CHECK7-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK7: cond.true:
// CHECK7-NEXT: br label [[COND_END:%.*]]
// CHECK7: cond.false:
// CHECK7-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK7-NEXT: br label [[COND_END]]
// CHECK7: cond.end:
// CHECK7-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK7-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK7-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK7-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK7: omp.inner.for.cond:
// CHECK7-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK7-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK7-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK7-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK7: omp.inner.for.body:
// CHECK7-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK7-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK7-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, %struct.SS*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], %struct.SS* [[TMP0]])
// CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK7: omp.inner.for.inc:
// CHECK7-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK7-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
// CHECK7-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK7: omp.inner.for.end:
// CHECK7-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK7: omp.loop.exit:
// CHECK7-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK7-NEXT: ret void
//
//
// CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..11
// CHECK7-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK7-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK7-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK7-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK7-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK7-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK7-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK7-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK7-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK7-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK7-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK7-NEXT: store i32 122, i32* [[DOTOMP_UB]], align 4
// CHECK7-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK7-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK7-NEXT: store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
// CHECK7-NEXT: store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
// CHECK7-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK7-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK7-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK7-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK7-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK7-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
// CHECK7-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32 1073741859, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 1)
// CHECK7-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK7: omp.dispatch.cond:
// CHECK7-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
// CHECK7-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
// CHECK7-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK7: omp.dispatch.body:
// CHECK7-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK7-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK7: omp.inner.for.cond:
// CHECK7-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
// CHECK7-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !11
// CHECK7-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
// CHECK7-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK7: omp.inner.for.body:
// CHECK7-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
// CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
// CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK7-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !11
// CHECK7-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
// CHECK7-NEXT: [[TMP12:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !11
// CHECK7-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i32 0, i32 [[TMP12]]
// CHECK7-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !11
// CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK7: omp.body.continue:
// CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK7: omp.inner.for.inc:
// CHECK7-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
// CHECK7-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP13]], 1
// CHECK7-NEXT: store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]]
// CHECK7: omp.inner.for.end:
// CHECK7-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK7: omp.dispatch.inc:
// CHECK7-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK7: omp.dispatch.end:
// CHECK7-NEXT: ret void
//
//
// CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l54
// CHECK7-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK7-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK7-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK7-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
// CHECK7-NEXT: ret void
//
//
// CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..14
// CHECK7-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK7-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK7-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK7-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK7-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK7-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK7-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK7-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK7-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK7-NEXT: store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK7-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK7-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK7-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK7-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK7-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK7-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK7-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
// CHECK7-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK7: cond.true:
// CHECK7-NEXT: br label [[COND_END:%.*]]
// CHECK7: cond.false:
// CHECK7-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK7-NEXT: br label [[COND_END]]
// CHECK7: cond.end:
// CHECK7-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK7-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK7-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK7-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK7: omp.inner.for.cond:
// CHECK7-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK7-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK7-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK7-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK7: omp.inner.for.body:
// CHECK7-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK7-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK7-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, %struct.SS*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], %struct.SS* [[TMP0]])
// CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK7: omp.inner.for.inc:
// CHECK7-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK7-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
// CHECK7-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK7: omp.inner.for.end:
// CHECK7-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK7: omp.loop.exit:
// CHECK7-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK7-NEXT: ret void
//
//
// CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..15
// CHECK7-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK7-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK7-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK7-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK7-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK7-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK7-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK7-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK7-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK7-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK7-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK7-NEXT: store i32 122, i32* [[DOTOMP_UB]], align 4
// CHECK7-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK7-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK7-NEXT: store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
// CHECK7-NEXT: store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
// CHECK7-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK7-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK7-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK7-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK7-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK7-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
// CHECK7-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32 1073741859, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 61)
// CHECK7-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK7: omp.dispatch.cond:
// CHECK7-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
// CHECK7-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
// CHECK7-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK7: omp.dispatch.body:
// CHECK7-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK7-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK7: omp.inner.for.cond:
// CHECK7-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
// CHECK7-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !14
// CHECK7-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
// CHECK7-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK7: omp.inner.for.body:
// CHECK7-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
// CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
// CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK7-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !14
// CHECK7-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
// CHECK7-NEXT: [[TMP12:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !14
// CHECK7-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i32 0, i32 [[TMP12]]
// CHECK7-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !14
// CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK7: omp.body.continue:
// CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK7: omp.inner.for.inc:
// CHECK7-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
// CHECK7-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP13]], 1
// CHECK7-NEXT: store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP15:![0-9]+]]
// CHECK7: omp.inner.for.end:
// CHECK7-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK7: omp.dispatch.inc:
// CHECK7-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK7: omp.dispatch.end:
// CHECK7-NEXT: ret void
//
//
// CHECK7-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
// CHECK7-SAME: () #[[ATTR3:[0-9]+]] {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: call void @__tgt_register_requires(i64 1)
// CHECK7-NEXT: ret void
//
//
// CHECK8-LABEL: define {{[^@]+}}@_Z21teams_template_structv
// CHECK8-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK8-NEXT: entry:
// CHECK8-NEXT: [[V:%.*]] = alloca [[STRUCT_SS:%.*]], align 4
// CHECK8-NEXT: [[CALL:%.*]] = call i32 @_ZN2SSIiLi123ELx456EE3fooEv(%struct.SS* nonnull align 4 dereferenceable(496) [[V]])
// CHECK8-NEXT: ret i32 [[CALL]]
//
//
// CHECK8-LABEL: define {{[^@]+}}@_ZN2SSIiLi123ELx456EE3fooEv
// CHECK8-SAME: (%struct.SS* nonnull align 4 dereferenceable(496) [[THIS:%.*]]) #[[ATTR0]] comdat align 2 {
// CHECK8-NEXT: entry:
// CHECK8-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK8-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 4
// CHECK8-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 4
// CHECK8-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 4
// CHECK8-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[DOTOFFLOAD_BASEPTRS3:%.*]] = alloca [1 x i8*], align 4
// CHECK8-NEXT: [[DOTOFFLOAD_PTRS4:%.*]] = alloca [1 x i8*], align 4
// CHECK8-NEXT: [[DOTOFFLOAD_MAPPERS5:%.*]] = alloca [1 x i8*], align 4
// CHECK8-NEXT: [[_TMP6:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[DOTOFFLOAD_BASEPTRS10:%.*]] = alloca [1 x i8*], align 4
// CHECK8-NEXT: [[DOTOFFLOAD_PTRS11:%.*]] = alloca [1 x i8*], align 4
// CHECK8-NEXT: [[DOTOFFLOAD_MAPPERS12:%.*]] = alloca [1 x i8*], align 4
// CHECK8-NEXT: [[_TMP13:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[DOTOFFLOAD_BASEPTRS17:%.*]] = alloca [1 x i8*], align 4
// CHECK8-NEXT: [[DOTOFFLOAD_PTRS18:%.*]] = alloca [1 x i8*], align 4
// CHECK8-NEXT: [[DOTOFFLOAD_MAPPERS19:%.*]] = alloca [1 x i8*], align 4
// CHECK8-NEXT: [[_TMP20:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[DOTOFFLOAD_BASEPTRS24:%.*]] = alloca [1 x i8*], align 4
// CHECK8-NEXT: [[DOTOFFLOAD_PTRS25:%.*]] = alloca [1 x i8*], align 4
// CHECK8-NEXT: [[DOTOFFLOAD_MAPPERS26:%.*]] = alloca [1 x i8*], align 4
// CHECK8-NEXT: [[_TMP27:%.*]] = alloca i32, align 4
// CHECK8-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK8-NEXT: [[THIS1:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK8-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[THIS1]], i32 0, i32 0
// CHECK8-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK8-NEXT: [[TMP1:%.*]] = bitcast i8** [[TMP0]] to %struct.SS**
// CHECK8-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP1]], align 4
// CHECK8-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK8-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to [123 x i32]**
// CHECK8-NEXT: store [123 x i32]* [[A]], [123 x i32]** [[TMP3]], align 4
// CHECK8-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// CHECK8-NEXT: store i8* null, i8** [[TMP4]], align 4
// CHECK8-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK8-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK8-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 123)
// CHECK8-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK8-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0
// CHECK8-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK8: omp_offload.failed:
// CHECK8-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36(%struct.SS* [[THIS1]]) #[[ATTR2:[0-9]+]]
// CHECK8-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK8: omp_offload.cont:
// CHECK8-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
// CHECK8-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
// CHECK8-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to %struct.SS**
// CHECK8-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP10]], align 4
// CHECK8-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
// CHECK8-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to [123 x i32]**
// CHECK8-NEXT: store [123 x i32]* [[A2]], [123 x i32]** [[TMP12]], align 4
// CHECK8-NEXT: [[TMP13:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS5]], i32 0, i32 0
// CHECK8-NEXT: store i8* null, i8** [[TMP13]], align 4
// CHECK8-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
// CHECK8-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
// CHECK8-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
// CHECK8-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l40.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK8-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
// CHECK8-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED7:%.*]], label [[OMP_OFFLOAD_CONT8:%.*]]
// CHECK8: omp_offload.failed7:
// CHECK8-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l40(%struct.SS* [[THIS1]]) #[[ATTR2]]
// CHECK8-NEXT: br label [[OMP_OFFLOAD_CONT8]]
// CHECK8: omp_offload.cont8:
// CHECK8-NEXT: [[A9:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
// CHECK8-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0
// CHECK8-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to %struct.SS**
// CHECK8-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP19]], align 4
// CHECK8-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0
// CHECK8-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to [123 x i32]**
// CHECK8-NEXT: store [123 x i32]* [[A9]], [123 x i32]** [[TMP21]], align 4
// CHECK8-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS12]], i32 0, i32 0
// CHECK8-NEXT: store i8* null, i8** [[TMP22]], align 4
// CHECK8-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0
// CHECK8-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0
// CHECK8-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
// CHECK8-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l44.region_id, i32 1, i8** [[TMP23]], i8** [[TMP24]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK8-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
// CHECK8-NEXT: br i1 [[TMP26]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]]
// CHECK8: omp_offload.failed14:
// CHECK8-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l44(%struct.SS* [[THIS1]]) #[[ATTR2]]
// CHECK8-NEXT: br label [[OMP_OFFLOAD_CONT15]]
// CHECK8: omp_offload.cont15:
// CHECK8-NEXT: [[A16:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
// CHECK8-NEXT: [[TMP27:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0
// CHECK8-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to %struct.SS**
// CHECK8-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP28]], align 4
// CHECK8-NEXT: [[TMP29:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0
// CHECK8-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to [123 x i32]**
// CHECK8-NEXT: store [123 x i32]* [[A16]], [123 x i32]** [[TMP30]], align 4
// CHECK8-NEXT: [[TMP31:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 0
// CHECK8-NEXT: store i8* null, i8** [[TMP31]], align 4
// CHECK8-NEXT: [[TMP32:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0
// CHECK8-NEXT: [[TMP33:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0
// CHECK8-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
// CHECK8-NEXT: [[TMP34:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l49.region_id, i32 1, i8** [[TMP32]], i8** [[TMP33]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK8-NEXT: [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
// CHECK8-NEXT: br i1 [[TMP35]], label [[OMP_OFFLOAD_FAILED21:%.*]], label [[OMP_OFFLOAD_CONT22:%.*]]
// CHECK8: omp_offload.failed21:
// CHECK8-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l49(%struct.SS* [[THIS1]]) #[[ATTR2]]
// CHECK8-NEXT: br label [[OMP_OFFLOAD_CONT22]]
// CHECK8: omp_offload.cont22:
// CHECK8-NEXT: [[A23:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
// CHECK8-NEXT: [[TMP36:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS24]], i32 0, i32 0
// CHECK8-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to %struct.SS**
// CHECK8-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP37]], align 4
// CHECK8-NEXT: [[TMP38:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS25]], i32 0, i32 0
// CHECK8-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to [123 x i32]**
// CHECK8-NEXT: store [123 x i32]* [[A23]], [123 x i32]** [[TMP39]], align 4
// CHECK8-NEXT: [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS26]], i32 0, i32 0
// CHECK8-NEXT: store i8* null, i8** [[TMP40]], align 4
// CHECK8-NEXT: [[TMP41:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS24]], i32 0, i32 0
// CHECK8-NEXT: [[TMP42:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS25]], i32 0, i32 0
// CHECK8-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 123)
// CHECK8-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l54.region_id, i32 1, i8** [[TMP41]], i8** [[TMP42]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK8-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0
// CHECK8-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED28:%.*]], label [[OMP_OFFLOAD_CONT29:%.*]]
// CHECK8: omp_offload.failed28:
// CHECK8-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l54(%struct.SS* [[THIS1]]) #[[ATTR2]]
// CHECK8-NEXT: br label [[OMP_OFFLOAD_CONT29]]
// CHECK8: omp_offload.cont29:
// CHECK8-NEXT: [[A30:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
// CHECK8-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A30]], i32 0, i32 0
// CHECK8-NEXT: [[TMP45:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK8-NEXT: ret i32 [[TMP45]]
//
//
// CHECK8-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36
// CHECK8-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK8-NEXT: entry:
// CHECK8-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK8-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK8-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK8-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
// CHECK8-NEXT: ret void
//
//
// CHECK8-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK8-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK8-NEXT: entry:
// CHECK8-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK8-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK8-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK8-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK8-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK8-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK8-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK8-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK8-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK8-NEXT: store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK8-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK8-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK8-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK8-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK8-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK8-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK8-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
// CHECK8-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK8: cond.true:
// CHECK8-NEXT: br label [[COND_END:%.*]]
// CHECK8: cond.false:
// CHECK8-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK8-NEXT: br label [[COND_END]]
// CHECK8: cond.end:
// CHECK8-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK8-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK8-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK8-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK8-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK8: omp.inner.for.cond:
// CHECK8-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK8-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK8-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK8-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK8: omp.inner.for.body:
// CHECK8-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK8-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK8-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, %struct.SS*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], %struct.SS* [[TMP0]])
// CHECK8-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK8: omp.inner.for.inc:
// CHECK8-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK8-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK8-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
// CHECK8-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK8-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK8: omp.inner.for.end:
// CHECK8-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK8: omp.loop.exit:
// CHECK8-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK8-NEXT: ret void
//
//
// CHECK8-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK8-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK8-NEXT: entry:
// CHECK8-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK8-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK8-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK8-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK8-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK8-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK8-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK8-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK8-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK8-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK8-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK8-NEXT: store i32 122, i32* [[DOTOMP_UB]], align 4
// CHECK8-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK8-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK8-NEXT: store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
// CHECK8-NEXT: store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
// CHECK8-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK8-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK8-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK8-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK8-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK8-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK8-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 122
// CHECK8-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK8: cond.true:
// CHECK8-NEXT: br label [[COND_END:%.*]]
// CHECK8: cond.false:
// CHECK8-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK8-NEXT: br label [[COND_END]]
// CHECK8: cond.end:
// CHECK8-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
// CHECK8-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK8-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK8-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
// CHECK8-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK8: omp.inner.for.cond:
// CHECK8-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK8-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK8-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
// CHECK8-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK8: omp.inner.for.body:
// CHECK8-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK8-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
// CHECK8-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK8-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK8-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
// CHECK8-NEXT: [[TMP11:%.*]] = load i32, i32* [[I]], align 4
// CHECK8-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i32 0, i32 [[TMP11]]
// CHECK8-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK8-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK8: omp.body.continue:
// CHECK8-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK8: omp.inner.for.inc:
// CHECK8-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK8-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP12]], 1
// CHECK8-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4
// CHECK8-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK8: omp.inner.for.end:
// CHECK8-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK8: omp.loop.exit:
// CHECK8-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
// CHECK8-NEXT: ret void
//
//
// CHECK8-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l40
// CHECK8-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK8-NEXT: entry:
// CHECK8-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK8-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK8-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK8-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
// CHECK8-NEXT: ret void
//
//
// CHECK8-LABEL: define {{[^@]+}}@.omp_outlined..2
// CHECK8-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK8-NEXT: entry:
// CHECK8-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK8-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK8-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK8-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK8-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK8-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK8-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK8-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK8-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK8-NEXT: store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK8-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK8-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK8-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK8-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK8-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK8-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK8-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
// CHECK8-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK8: cond.true:
// CHECK8-NEXT: br label [[COND_END:%.*]]
// CHECK8: cond.false:
// CHECK8-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK8-NEXT: br label [[COND_END]]
// CHECK8: cond.end:
// CHECK8-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK8-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK8-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK8-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK8-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK8: omp.inner.for.cond:
// CHECK8-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK8-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK8-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK8-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK8: omp.inner.for.body:
// CHECK8-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK8-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK8-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, %struct.SS*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], %struct.SS* [[TMP0]])
// CHECK8-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK8: omp.inner.for.inc:
// CHECK8-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK8-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK8-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
// CHECK8-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK8-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK8: omp.inner.for.end:
// CHECK8-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK8: omp.loop.exit:
// CHECK8-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK8-NEXT: ret void
//
//
// CHECK8-LABEL: define {{[^@]+}}@.omp_outlined..3
// CHECK8-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK8-NEXT: entry:
// CHECK8-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK8-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK8-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK8-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK8-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK8-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK8-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK8-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK8-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK8-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK8-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK8-NEXT: store i32 122, i32* [[DOTOMP_UB]], align 4
// CHECK8-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK8-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK8-NEXT: store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
// CHECK8-NEXT: store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
// CHECK8-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK8-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK8-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK8-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK8-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK8-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK8-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 122
// CHECK8-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK8: cond.true:
// CHECK8-NEXT: br label [[COND_END:%.*]]
// CHECK8: cond.false:
// CHECK8-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK8-NEXT: br label [[COND_END]]
// CHECK8: cond.end:
// CHECK8-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
// CHECK8-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK8-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK8-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
// CHECK8-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK8: omp.inner.for.cond:
// CHECK8-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK8-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK8-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
// CHECK8-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK8: omp.inner.for.body:
// CHECK8-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK8-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
// CHECK8-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK8-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK8-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
// CHECK8-NEXT: [[TMP11:%.*]] = load i32, i32* [[I]], align 4
// CHECK8-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i32 0, i32 [[TMP11]]
// CHECK8-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK8-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK8: omp.body.continue:
// CHECK8-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK8: omp.inner.for.inc:
// CHECK8-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK8-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP12]], 1
// CHECK8-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4
// CHECK8-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK8: omp.inner.for.end:
// CHECK8-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK8: omp.loop.exit:
// CHECK8-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
// CHECK8-NEXT: ret void
//
//
// CHECK8-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l44
// CHECK8-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK8-NEXT: entry:
// CHECK8-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK8-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK8-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK8-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
// CHECK8-NEXT: ret void
//
//
// CHECK8-LABEL: define {{[^@]+}}@.omp_outlined..6
// CHECK8-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK8-NEXT: entry:
// CHECK8-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK8-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK8-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK8-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK8-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK8-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK8-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK8-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK8-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK8-NEXT: store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK8-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK8-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK8-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK8-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK8-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK8-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK8-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
// CHECK8-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK8: cond.true:
// CHECK8-NEXT: br label [[COND_END:%.*]]
// CHECK8: cond.false:
// CHECK8-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK8-NEXT: br label [[COND_END]]
// CHECK8: cond.end:
// CHECK8-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK8-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK8-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK8-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK8-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK8: omp.inner.for.cond:
// CHECK8-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK8-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK8-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK8-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK8: omp.inner.for.body:
// CHECK8-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK8-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK8-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, %struct.SS*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], %struct.SS* [[TMP0]])
// CHECK8-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK8: omp.inner.for.inc:
// CHECK8-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK8-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK8-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
// CHECK8-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK8-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK8: omp.inner.for.end:
// CHECK8-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK8: omp.loop.exit:
// CHECK8-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK8-NEXT: ret void
//
//
// CHECK8-LABEL: define {{[^@]+}}@.omp_outlined..7
// CHECK8-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK8-NEXT: entry:
// CHECK8-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK8-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK8-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK8-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK8-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK8-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK8-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK8-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK8-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK8-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK8-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK8-NEXT: store i32 122, i32* [[DOTOMP_UB]], align 4
// CHECK8-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK8-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK8-NEXT: store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
// CHECK8-NEXT: store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
// CHECK8-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK8-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK8-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK8-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK8-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 61)
// CHECK8-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK8: omp.dispatch.cond:
// CHECK8-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK8-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK8-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], [[TMP6]]
// CHECK8-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK8: cond.true:
// CHECK8-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK8-NEXT: br label [[COND_END:%.*]]
// CHECK8: cond.false:
// CHECK8-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK8-NEXT: br label [[COND_END]]
// CHECK8: cond.end:
// CHECK8-NEXT: [[COND:%.*]] = phi i32 [ [[TMP7]], [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ]
// CHECK8-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK8-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK8-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
// CHECK8-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK8-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK8-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
// CHECK8-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK8: omp.dispatch.body:
// CHECK8-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK8: omp.inner.for.cond:
// CHECK8-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK8-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK8-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
// CHECK8-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK8: omp.inner.for.body:
// CHECK8-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK8-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP14]], 1
// CHECK8-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK8-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK8-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
// CHECK8-NEXT: [[TMP15:%.*]] = load i32, i32* [[I]], align 4
// CHECK8-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i32 0, i32 [[TMP15]]
// CHECK8-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK8-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK8: omp.body.continue:
// CHECK8-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK8: omp.inner.for.inc:
// CHECK8-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK8-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP16]], 1
// CHECK8-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4
// CHECK8-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK8: omp.inner.for.end:
// CHECK8-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK8: omp.dispatch.inc:
// CHECK8-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK8-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK8-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
// CHECK8-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_LB]], align 4
// CHECK8-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK8-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK8-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
// CHECK8-NEXT: store i32 [[ADD5]], i32* [[DOTOMP_UB]], align 4
// CHECK8-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK8: omp.dispatch.end:
// CHECK8-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
// CHECK8-NEXT: ret void
//
//
// CHECK8-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l49
// CHECK8-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK8-NEXT: entry:
// CHECK8-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK8-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK8-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK8-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
// CHECK8-NEXT: ret void
//
//
// CHECK8-LABEL: define {{[^@]+}}@.omp_outlined..10
// CHECK8-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK8-NEXT: entry:
// CHECK8-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK8-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK8-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK8-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK8-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK8-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK8-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK8-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK8-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK8-NEXT: store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK8-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK8-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK8-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK8-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK8-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK8-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK8-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
// CHECK8-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK8: cond.true:
// CHECK8-NEXT: br label [[COND_END:%.*]]
// CHECK8: cond.false:
// CHECK8-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK8-NEXT: br label [[COND_END]]
// CHECK8: cond.end:
// CHECK8-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK8-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK8-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK8-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK8-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK8: omp.inner.for.cond:
// CHECK8-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK8-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK8-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK8-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK8: omp.inner.for.body:
// CHECK8-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK8-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK8-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, %struct.SS*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], %struct.SS* [[TMP0]])
// CHECK8-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK8: omp.inner.for.inc:
// CHECK8-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK8-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK8-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
// CHECK8-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK8-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK8: omp.inner.for.end:
// CHECK8-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK8: omp.loop.exit:
// CHECK8-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK8-NEXT: ret void
//
//
// CHECK8-LABEL: define {{[^@]+}}@.omp_outlined..11
// CHECK8-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK8-NEXT: entry:
// CHECK8-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK8-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK8-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK8-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK8-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK8-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK8-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK8-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK8-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK8-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK8-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK8-NEXT: store i32 122, i32* [[DOTOMP_UB]], align 4
// CHECK8-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK8-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK8-NEXT: store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
// CHECK8-NEXT: store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
// CHECK8-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK8-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK8-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK8-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK8-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK8-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
// CHECK8-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32 1073741859, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 1)
// CHECK8-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK8: omp.dispatch.cond:
// CHECK8-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
// CHECK8-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
// CHECK8-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK8: omp.dispatch.body:
// CHECK8-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK8-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
// CHECK8-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK8: omp.inner.for.cond:
// CHECK8-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
// CHECK8-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !11
// CHECK8-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
// CHECK8-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK8: omp.inner.for.body:
// CHECK8-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
// CHECK8-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
// CHECK8-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK8-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !11
// CHECK8-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
// CHECK8-NEXT: [[TMP12:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !11
// CHECK8-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i32 0, i32 [[TMP12]]
// CHECK8-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !11
// CHECK8-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK8: omp.body.continue:
// CHECK8-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK8: omp.inner.for.inc:
// CHECK8-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
// CHECK8-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP13]], 1
// CHECK8-NEXT: store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11
// CHECK8-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]]
// CHECK8: omp.inner.for.end:
// CHECK8-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK8: omp.dispatch.inc:
// CHECK8-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK8: omp.dispatch.end:
// CHECK8-NEXT: ret void
//
//
// CHECK8-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l54
// CHECK8-SAME: (%struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK8-NEXT: entry:
// CHECK8-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK8-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK8-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK8-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]])
// CHECK8-NEXT: ret void
//
//
// CHECK8-LABEL: define {{[^@]+}}@.omp_outlined..14
// CHECK8-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK8-NEXT: entry:
// CHECK8-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK8-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK8-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK8-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK8-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK8-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK8-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK8-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK8-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK8-NEXT: store i32 122, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK8-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK8-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK8-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK8-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK8-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK8-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK8-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
// CHECK8-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK8: cond.true:
// CHECK8-NEXT: br label [[COND_END:%.*]]
// CHECK8: cond.false:
// CHECK8-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK8-NEXT: br label [[COND_END]]
// CHECK8: cond.end:
// CHECK8-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK8-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK8-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK8-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK8-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK8: omp.inner.for.cond:
// CHECK8-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK8-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK8-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK8-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK8: omp.inner.for.body:
// CHECK8-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK8-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK8-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, %struct.SS*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], %struct.SS* [[TMP0]])
// CHECK8-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK8: omp.inner.for.inc:
// CHECK8-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK8-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK8-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
// CHECK8-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK8-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK8: omp.inner.for.end:
// CHECK8-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK8: omp.loop.exit:
// CHECK8-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK8-NEXT: ret void
//
//
// CHECK8-LABEL: define {{[^@]+}}@.omp_outlined..15
// CHECK8-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], %struct.SS* [[THIS:%.*]]) #[[ATTR1]] {
// CHECK8-NEXT: entry:
// CHECK8-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK8-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK8-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4
// CHECK8-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK8-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK8-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK8-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK8-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK8-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4
// CHECK8-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4
// CHECK8-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK8-NEXT: store i32 122, i32* [[DOTOMP_UB]], align 4
// CHECK8-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK8-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK8-NEXT: store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
// CHECK8-NEXT: store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
// CHECK8-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK8-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK8-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK8-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK8-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK8-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
// CHECK8-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32 1073741859, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 61)
// CHECK8-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK8: omp.dispatch.cond:
// CHECK8-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
// CHECK8-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
// CHECK8-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK8: omp.dispatch.body:
// CHECK8-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK8-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
// CHECK8-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK8: omp.inner.for.cond:
// CHECK8-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
// CHECK8-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !14
// CHECK8-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
// CHECK8-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK8: omp.inner.for.body:
// CHECK8-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
// CHECK8-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
// CHECK8-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK8-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !14
// CHECK8-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0
// CHECK8-NEXT: [[TMP12:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !14
// CHECK8-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i32 0, i32 [[TMP12]]
// CHECK8-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !14
// CHECK8-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK8: omp.body.continue:
// CHECK8-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK8: omp.inner.for.inc:
// CHECK8-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
// CHECK8-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP13]], 1
// CHECK8-NEXT: store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !14
// CHECK8-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP15:![0-9]+]]
// CHECK8: omp.inner.for.end:
// CHECK8-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK8: omp.dispatch.inc:
// CHECK8-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK8: omp.dispatch.end:
// CHECK8-NEXT: ret void
//
//
// CHECK8-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
// CHECK8-SAME: () #[[ATTR3:[0-9]+]] {
// CHECK8-NEXT: entry:
// CHECK8-NEXT: call void @__tgt_register_requires(i64 1)
// CHECK8-NEXT: ret void
//
//
// CHECK13-LABEL: define {{[^@]+}}@main
// CHECK13-SAME: (i32 signext [[ARGC:%.*]], i8** [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK13-NEXT: entry:
// CHECK13-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[ARGV_ADDR:%.*]] = alloca i8**, align 8
// CHECK13-NEXT: [[N:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
// CHECK13-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[M:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8
// CHECK13-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8
// CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8
// CHECK13-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8
// CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[N_CASTED3:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [3 x i8*], align 8
// CHECK13-NEXT: [[DOTOFFLOAD_PTRS6:%.*]] = alloca [3 x i8*], align 8
// CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [3 x i8*], align 8
// CHECK13-NEXT: [[DOTOFFLOAD_SIZES8:%.*]] = alloca [3 x i64], align 8
// CHECK13-NEXT: [[_TMP9:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTCAPTURE_EXPR_11:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTCAPTURE_EXPR_18:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[N_CASTED19:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS22:%.*]] = alloca [4 x i8*], align 8
// CHECK13-NEXT: [[DOTOFFLOAD_PTRS23:%.*]] = alloca [4 x i8*], align 8
// CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS24:%.*]] = alloca [4 x i8*], align 8
// CHECK13-NEXT: [[DOTOFFLOAD_SIZES25:%.*]] = alloca [4 x i64], align 8
// CHECK13-NEXT: [[_TMP26:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTCAPTURE_EXPR_27:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTCAPTURE_EXPR_28:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[N_CASTED35:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS37:%.*]] = alloca [3 x i8*], align 8
// CHECK13-NEXT: [[DOTOFFLOAD_PTRS38:%.*]] = alloca [3 x i8*], align 8
// CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS39:%.*]] = alloca [3 x i8*], align 8
// CHECK13-NEXT: [[DOTOFFLOAD_SIZES40:%.*]] = alloca [3 x i64], align 8
// CHECK13-NEXT: [[_TMP41:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTCAPTURE_EXPR_42:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTCAPTURE_EXPR_43:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTCAPTURE_EXPR_50:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[N_CASTED51:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[DOTCAPTURE_EXPR__CASTED53:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS55:%.*]] = alloca [4 x i8*], align 8
// CHECK13-NEXT: [[DOTOFFLOAD_PTRS56:%.*]] = alloca [4 x i8*], align 8
// CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS57:%.*]] = alloca [4 x i8*], align 8
// CHECK13-NEXT: [[DOTOFFLOAD_SIZES58:%.*]] = alloca [4 x i64], align 8
// CHECK13-NEXT: [[_TMP59:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTCAPTURE_EXPR_60:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTCAPTURE_EXPR_61:%.*]] = alloca i32, align 4
// CHECK13-NEXT: store i32 0, i32* [[RETVAL]], align 4
// CHECK13-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
// CHECK13-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8
// CHECK13-NEXT: store i32 100, i32* [[N]], align 4
// CHECK13-NEXT: [[TMP0:%.*]] = load i32, i32* [[N]], align 4
// CHECK13-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
// CHECK13-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave()
// CHECK13-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8
// CHECK13-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4
// CHECK13-NEXT: store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8
// CHECK13-NEXT: store i32 10, i32* [[M]], align 4
// CHECK13-NEXT: [[TMP3:%.*]] = load i32, i32* [[N]], align 4
// CHECK13-NEXT: [[CONV:%.*]] = bitcast i64* [[N_CASTED]] to i32*
// CHECK13-NEXT: store i32 [[TMP3]], i32* [[CONV]], align 4
// CHECK13-NEXT: [[TMP4:%.*]] = load i64, i64* [[N_CASTED]], align 8
// CHECK13-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP1]], 4
// CHECK13-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK13-NEXT: [[TMP7:%.*]] = bitcast i8** [[TMP6]] to i64*
// CHECK13-NEXT: store i64 [[TMP4]], i64* [[TMP7]], align 8
// CHECK13-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK13-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64*
// CHECK13-NEXT: store i64 [[TMP4]], i64* [[TMP9]], align 8
// CHECK13-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
// CHECK13-NEXT: store i64 4, i64* [[TMP10]], align 8
// CHECK13-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
// CHECK13-NEXT: store i8* null, i8** [[TMP11]], align 8
// CHECK13-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
// CHECK13-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64*
// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP13]], align 8
// CHECK13-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
// CHECK13-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64*
// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP15]], align 8
// CHECK13-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1
// CHECK13-NEXT: store i64 8, i64* [[TMP16]], align 8
// CHECK13-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
// CHECK13-NEXT: store i8* null, i8** [[TMP17]], align 8
// CHECK13-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
// CHECK13-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32**
// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 8
// CHECK13-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
// CHECK13-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32**
// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 8
// CHECK13-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2
// CHECK13-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 8
// CHECK13-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
// CHECK13-NEXT: store i8* null, i8** [[TMP23]], align 8
// CHECK13-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK13-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK13-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
// CHECK13-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4
// CHECK13-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK13-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK13-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0
// CHECK13-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK13-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK13-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK13-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1
// CHECK13-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64
// CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]])
// CHECK13-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK13-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
// CHECK13-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK13: omp_offload.failed:
// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139(i64 [[TMP4]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]]
// CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK13: omp_offload.cont:
// CHECK13-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4
// CHECK13-NEXT: [[CONV4:%.*]] = bitcast i64* [[N_CASTED3]] to i32*
// CHECK13-NEXT: store i32 [[TMP33]], i32* [[CONV4]], align 4
// CHECK13-NEXT: [[TMP34:%.*]] = load i64, i64* [[N_CASTED3]], align 8
// CHECK13-NEXT: [[TMP35:%.*]] = mul nuw i64 [[TMP1]], 4
// CHECK13-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0
// CHECK13-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i64*
// CHECK13-NEXT: store i64 [[TMP34]], i64* [[TMP37]], align 8
// CHECK13-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0
// CHECK13-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i64*
// CHECK13-NEXT: store i64 [[TMP34]], i64* [[TMP39]], align 8
// CHECK13-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0
// CHECK13-NEXT: store i64 4, i64* [[TMP40]], align 8
// CHECK13-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0
// CHECK13-NEXT: store i8* null, i8** [[TMP41]], align 8
// CHECK13-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1
// CHECK13-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i64*
// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP43]], align 8
// CHECK13-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1
// CHECK13-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i64*
// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP45]], align 8
// CHECK13-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 1
// CHECK13-NEXT: store i64 8, i64* [[TMP46]], align 8
// CHECK13-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1
// CHECK13-NEXT: store i8* null, i8** [[TMP47]], align 8
// CHECK13-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2
// CHECK13-NEXT: [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32**
// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP49]], align 8
// CHECK13-NEXT: [[TMP50:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2
// CHECK13-NEXT: [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i32**
// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP51]], align 8
// CHECK13-NEXT: [[TMP52:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 2
// CHECK13-NEXT: store i64 [[TMP35]], i64* [[TMP52]], align 8
// CHECK13-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2
// CHECK13-NEXT: store i8* null, i8** [[TMP53]], align 8
// CHECK13-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0
// CHECK13-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0
// CHECK13-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0
// CHECK13-NEXT: [[TMP57:%.*]] = load i32, i32* [[N]], align 4
// CHECK13-NEXT: store i32 [[TMP57]], i32* [[DOTCAPTURE_EXPR_10]], align 4
// CHECK13-NEXT: [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4
// CHECK13-NEXT: [[SUB12:%.*]] = sub nsw i32 [[TMP58]], 0
// CHECK13-NEXT: [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1
// CHECK13-NEXT: [[SUB14:%.*]] = sub nsw i32 [[DIV13]], 1
// CHECK13-NEXT: store i32 [[SUB14]], i32* [[DOTCAPTURE_EXPR_11]], align 4
// CHECK13-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4
// CHECK13-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP59]], 1
// CHECK13-NEXT: [[TMP60:%.*]] = zext i32 [[ADD15]] to i64
// CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP60]])
// CHECK13-NEXT: [[TMP61:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143.region_id, i32 3, i8** [[TMP54]], i8** [[TMP55]], i64* [[TMP56]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK13-NEXT: [[TMP62:%.*]] = icmp ne i32 [[TMP61]], 0
// CHECK13-NEXT: br i1 [[TMP62]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]]
// CHECK13: omp_offload.failed16:
// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143(i64 [[TMP34]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]]
// CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT17]]
// CHECK13: omp_offload.cont17:
// CHECK13-NEXT: [[TMP63:%.*]] = load i32, i32* [[M]], align 4
// CHECK13-NEXT: store i32 [[TMP63]], i32* [[DOTCAPTURE_EXPR_18]], align 4
// CHECK13-NEXT: [[TMP64:%.*]] = load i32, i32* [[N]], align 4
// CHECK13-NEXT: [[CONV20:%.*]] = bitcast i64* [[N_CASTED19]] to i32*
// CHECK13-NEXT: store i32 [[TMP64]], i32* [[CONV20]], align 4
// CHECK13-NEXT: [[TMP65:%.*]] = load i64, i64* [[N_CASTED19]], align 8
// CHECK13-NEXT: [[TMP66:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_18]], align 4
// CHECK13-NEXT: [[CONV21:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
// CHECK13-NEXT: store i32 [[TMP66]], i32* [[CONV21]], align 4
// CHECK13-NEXT: [[TMP67:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
// CHECK13-NEXT: [[TMP68:%.*]] = mul nuw i64 [[TMP1]], 4
// CHECK13-NEXT: [[TMP69:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0
// CHECK13-NEXT: [[TMP70:%.*]] = bitcast i8** [[TMP69]] to i64*
// CHECK13-NEXT: store i64 [[TMP65]], i64* [[TMP70]], align 8
// CHECK13-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0
// CHECK13-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i64*
// CHECK13-NEXT: store i64 [[TMP65]], i64* [[TMP72]], align 8
// CHECK13-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 0
// CHECK13-NEXT: store i64 4, i64* [[TMP73]], align 8
// CHECK13-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 0
// CHECK13-NEXT: store i8* null, i8** [[TMP74]], align 8
// CHECK13-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 1
// CHECK13-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i64*
// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP76]], align 8
// CHECK13-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 1
// CHECK13-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i64*
// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP78]], align 8
// CHECK13-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 1
// CHECK13-NEXT: store i64 8, i64* [[TMP79]], align 8
// CHECK13-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 1
// CHECK13-NEXT: store i8* null, i8** [[TMP80]], align 8
// CHECK13-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 2
// CHECK13-NEXT: [[TMP82:%.*]] = bitcast i8** [[TMP81]] to i32**
// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP82]], align 8
// CHECK13-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 2
// CHECK13-NEXT: [[TMP84:%.*]] = bitcast i8** [[TMP83]] to i32**
// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP84]], align 8
// CHECK13-NEXT: [[TMP85:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 2
// CHECK13-NEXT: store i64 [[TMP68]], i64* [[TMP85]], align 8
// CHECK13-NEXT: [[TMP86:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 2
// CHECK13-NEXT: store i8* null, i8** [[TMP86]], align 8
// CHECK13-NEXT: [[TMP87:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 3
// CHECK13-NEXT: [[TMP88:%.*]] = bitcast i8** [[TMP87]] to i64*
// CHECK13-NEXT: store i64 [[TMP67]], i64* [[TMP88]], align 8
// CHECK13-NEXT: [[TMP89:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 3
// CHECK13-NEXT: [[TMP90:%.*]] = bitcast i8** [[TMP89]] to i64*
// CHECK13-NEXT: store i64 [[TMP67]], i64* [[TMP90]], align 8
// CHECK13-NEXT: [[TMP91:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 3
// CHECK13-NEXT: store i64 4, i64* [[TMP91]], align 8
// CHECK13-NEXT: [[TMP92:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 3
// CHECK13-NEXT: store i8* null, i8** [[TMP92]], align 8
// CHECK13-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0
// CHECK13-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0
// CHECK13-NEXT: [[TMP95:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 0
// CHECK13-NEXT: [[TMP96:%.*]] = load i32, i32* [[N]], align 4
// CHECK13-NEXT: store i32 [[TMP96]], i32* [[DOTCAPTURE_EXPR_27]], align 4
// CHECK13-NEXT: [[TMP97:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_27]], align 4
// CHECK13-NEXT: [[SUB29:%.*]] = sub nsw i32 [[TMP97]], 0
// CHECK13-NEXT: [[DIV30:%.*]] = sdiv i32 [[SUB29]], 1
// CHECK13-NEXT: [[SUB31:%.*]] = sub nsw i32 [[DIV30]], 1
// CHECK13-NEXT: store i32 [[SUB31]], i32* [[DOTCAPTURE_EXPR_28]], align 4
// CHECK13-NEXT: [[TMP98:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_28]], align 4
// CHECK13-NEXT: [[ADD32:%.*]] = add nsw i32 [[TMP98]], 1
// CHECK13-NEXT: [[TMP99:%.*]] = zext i32 [[ADD32]] to i64
// CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP99]])
// CHECK13-NEXT: [[TMP100:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147.region_id, i32 4, i8** [[TMP93]], i8** [[TMP94]], i64* [[TMP95]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK13-NEXT: [[TMP101:%.*]] = icmp ne i32 [[TMP100]], 0
// CHECK13-NEXT: br i1 [[TMP101]], label [[OMP_OFFLOAD_FAILED33:%.*]], label [[OMP_OFFLOAD_CONT34:%.*]]
// CHECK13: omp_offload.failed33:
// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147(i64 [[TMP65]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP67]]) #[[ATTR3]]
// CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT34]]
// CHECK13: omp_offload.cont34:
// CHECK13-NEXT: [[TMP102:%.*]] = load i32, i32* [[N]], align 4
// CHECK13-NEXT: [[CONV36:%.*]] = bitcast i64* [[N_CASTED35]] to i32*
// CHECK13-NEXT: store i32 [[TMP102]], i32* [[CONV36]], align 4
// CHECK13-NEXT: [[TMP103:%.*]] = load i64, i64* [[N_CASTED35]], align 8
// CHECK13-NEXT: [[TMP104:%.*]] = mul nuw i64 [[TMP1]], 4
// CHECK13-NEXT: [[TMP105:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 0
// CHECK13-NEXT: [[TMP106:%.*]] = bitcast i8** [[TMP105]] to i64*
// CHECK13-NEXT: store i64 [[TMP103]], i64* [[TMP106]], align 8
// CHECK13-NEXT: [[TMP107:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 0
// CHECK13-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to i64*
// CHECK13-NEXT: store i64 [[TMP103]], i64* [[TMP108]], align 8
// CHECK13-NEXT: [[TMP109:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 0
// CHECK13-NEXT: store i64 4, i64* [[TMP109]], align 8
// CHECK13-NEXT: [[TMP110:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS39]], i64 0, i64 0
// CHECK13-NEXT: store i8* null, i8** [[TMP110]], align 8
// CHECK13-NEXT: [[TMP111:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 1
// CHECK13-NEXT: [[TMP112:%.*]] = bitcast i8** [[TMP111]] to i64*
// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP112]], align 8
// CHECK13-NEXT: [[TMP113:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 1
// CHECK13-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to i64*
// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP114]], align 8
// CHECK13-NEXT: [[TMP115:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 1
// CHECK13-NEXT: store i64 8, i64* [[TMP115]], align 8
// CHECK13-NEXT: [[TMP116:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS39]], i64 0, i64 1
// CHECK13-NEXT: store i8* null, i8** [[TMP116]], align 8
// CHECK13-NEXT: [[TMP117:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 2
// CHECK13-NEXT: [[TMP118:%.*]] = bitcast i8** [[TMP117]] to i32**
// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP118]], align 8
// CHECK13-NEXT: [[TMP119:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 2
// CHECK13-NEXT: [[TMP120:%.*]] = bitcast i8** [[TMP119]] to i32**
// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP120]], align 8
// CHECK13-NEXT: [[TMP121:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 2
// CHECK13-NEXT: store i64 [[TMP104]], i64* [[TMP121]], align 8
// CHECK13-NEXT: [[TMP122:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS39]], i64 0, i64 2
// CHECK13-NEXT: store i8* null, i8** [[TMP122]], align 8
// CHECK13-NEXT: [[TMP123:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 0
// CHECK13-NEXT: [[TMP124:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 0
// CHECK13-NEXT: [[TMP125:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 0
// CHECK13-NEXT: [[TMP126:%.*]] = load i32, i32* [[N]], align 4
// CHECK13-NEXT: store i32 [[TMP126]], i32* [[DOTCAPTURE_EXPR_42]], align 4
// CHECK13-NEXT: [[TMP127:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_42]], align 4
// CHECK13-NEXT: [[SUB44:%.*]] = sub nsw i32 [[TMP127]], 0
// CHECK13-NEXT: [[DIV45:%.*]] = sdiv i32 [[SUB44]], 1
// CHECK13-NEXT: [[SUB46:%.*]] = sub nsw i32 [[DIV45]], 1
// CHECK13-NEXT: store i32 [[SUB46]], i32* [[DOTCAPTURE_EXPR_43]], align 4
// CHECK13-NEXT: [[TMP128:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_43]], align 4
// CHECK13-NEXT: [[ADD47:%.*]] = add nsw i32 [[TMP128]], 1
// CHECK13-NEXT: [[TMP129:%.*]] = zext i32 [[ADD47]] to i64
// CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP129]])
// CHECK13-NEXT: [[TMP130:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151.region_id, i32 3, i8** [[TMP123]], i8** [[TMP124]], i64* [[TMP125]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK13-NEXT: [[TMP131:%.*]] = icmp ne i32 [[TMP130]], 0
// CHECK13-NEXT: br i1 [[TMP131]], label [[OMP_OFFLOAD_FAILED48:%.*]], label [[OMP_OFFLOAD_CONT49:%.*]]
// CHECK13: omp_offload.failed48:
// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151(i64 [[TMP103]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]]
// CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT49]]
// CHECK13: omp_offload.cont49:
// CHECK13-NEXT: [[TMP132:%.*]] = load i32, i32* [[M]], align 4
// CHECK13-NEXT: store i32 [[TMP132]], i32* [[DOTCAPTURE_EXPR_50]], align 4
// CHECK13-NEXT: [[TMP133:%.*]] = load i32, i32* [[N]], align 4
// CHECK13-NEXT: [[CONV52:%.*]] = bitcast i64* [[N_CASTED51]] to i32*
// CHECK13-NEXT: store i32 [[TMP133]], i32* [[CONV52]], align 4
// CHECK13-NEXT: [[TMP134:%.*]] = load i64, i64* [[N_CASTED51]], align 8
// CHECK13-NEXT: [[TMP135:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_50]], align 4
// CHECK13-NEXT: [[CONV54:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED53]] to i32*
// CHECK13-NEXT: store i32 [[TMP135]], i32* [[CONV54]], align 4
// CHECK13-NEXT: [[TMP136:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED53]], align 8
// CHECK13-NEXT: [[TMP137:%.*]] = mul nuw i64 [[TMP1]], 4
// CHECK13-NEXT: [[TMP138:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 0
// CHECK13-NEXT: [[TMP139:%.*]] = bitcast i8** [[TMP138]] to i64*
// CHECK13-NEXT: store i64 [[TMP134]], i64* [[TMP139]], align 8
// CHECK13-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 0
// CHECK13-NEXT: [[TMP141:%.*]] = bitcast i8** [[TMP140]] to i64*
// CHECK13-NEXT: store i64 [[TMP134]], i64* [[TMP141]], align 8
// CHECK13-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 0
// CHECK13-NEXT: store i64 4, i64* [[TMP142]], align 8
// CHECK13-NEXT: [[TMP143:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 0
// CHECK13-NEXT: store i8* null, i8** [[TMP143]], align 8
// CHECK13-NEXT: [[TMP144:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 1
// CHECK13-NEXT: [[TMP145:%.*]] = bitcast i8** [[TMP144]] to i64*
// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP145]], align 8
// CHECK13-NEXT: [[TMP146:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 1
// CHECK13-NEXT: [[TMP147:%.*]] = bitcast i8** [[TMP146]] to i64*
// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP147]], align 8
// CHECK13-NEXT: [[TMP148:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 1
// CHECK13-NEXT: store i64 8, i64* [[TMP148]], align 8
// CHECK13-NEXT: [[TMP149:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 1
// CHECK13-NEXT: store i8* null, i8** [[TMP149]], align 8
// CHECK13-NEXT: [[TMP150:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 2
// CHECK13-NEXT: [[TMP151:%.*]] = bitcast i8** [[TMP150]] to i32**
// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP151]], align 8
// CHECK13-NEXT: [[TMP152:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 2
// CHECK13-NEXT: [[TMP153:%.*]] = bitcast i8** [[TMP152]] to i32**
// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP153]], align 8
// CHECK13-NEXT: [[TMP154:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 2
// CHECK13-NEXT: store i64 [[TMP137]], i64* [[TMP154]], align 8
// CHECK13-NEXT: [[TMP155:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 2
// CHECK13-NEXT: store i8* null, i8** [[TMP155]], align 8
// CHECK13-NEXT: [[TMP156:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 3
// CHECK13-NEXT: [[TMP157:%.*]] = bitcast i8** [[TMP156]] to i64*
// CHECK13-NEXT: store i64 [[TMP136]], i64* [[TMP157]], align 8
// CHECK13-NEXT: [[TMP158:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 3
// CHECK13-NEXT: [[TMP159:%.*]] = bitcast i8** [[TMP158]] to i64*
// CHECK13-NEXT: store i64 [[TMP136]], i64* [[TMP159]], align 8
// CHECK13-NEXT: [[TMP160:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 3
// CHECK13-NEXT: store i64 4, i64* [[TMP160]], align 8
// CHECK13-NEXT: [[TMP161:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 3
// CHECK13-NEXT: store i8* null, i8** [[TMP161]], align 8
// CHECK13-NEXT: [[TMP162:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 0
// CHECK13-NEXT: [[TMP163:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 0
// CHECK13-NEXT: [[TMP164:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 0
// CHECK13-NEXT: [[TMP165:%.*]] = load i32, i32* [[N]], align 4
// CHECK13-NEXT: store i32 [[TMP165]], i32* [[DOTCAPTURE_EXPR_60]], align 4
// CHECK13-NEXT: [[TMP166:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_60]], align 4
// CHECK13-NEXT: [[SUB62:%.*]] = sub nsw i32 [[TMP166]], 0
// CHECK13-NEXT: [[DIV63:%.*]] = sdiv i32 [[SUB62]], 1
// CHECK13-NEXT: [[SUB64:%.*]] = sub nsw i32 [[DIV63]], 1
// CHECK13-NEXT: store i32 [[SUB64]], i32* [[DOTCAPTURE_EXPR_61]], align 4
// CHECK13-NEXT: [[TMP167:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_61]], align 4
// CHECK13-NEXT: [[ADD65:%.*]] = add nsw i32 [[TMP167]], 1
// CHECK13-NEXT: [[TMP168:%.*]] = zext i32 [[ADD65]] to i64
// CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP168]])
// CHECK13-NEXT: [[TMP169:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155.region_id, i32 4, i8** [[TMP162]], i8** [[TMP163]], i64* [[TMP164]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK13-NEXT: [[TMP170:%.*]] = icmp ne i32 [[TMP169]], 0
// CHECK13-NEXT: br i1 [[TMP170]], label [[OMP_OFFLOAD_FAILED66:%.*]], label [[OMP_OFFLOAD_CONT67:%.*]]
// CHECK13: omp_offload.failed66:
// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155(i64 [[TMP134]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP136]]) #[[ATTR3]]
// CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT67]]
// CHECK13: omp_offload.cont67:
// CHECK13-NEXT: [[TMP171:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4
// CHECK13-NEXT: [[CALL:%.*]] = call signext i32 @_Z5tmainIiLi10EEiT_(i32 signext [[TMP171]])
// CHECK13-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4
// CHECK13-NEXT: [[TMP172:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
// CHECK13-NEXT: call void @llvm.stackrestore(i8* [[TMP172]])
// CHECK13-NEXT: [[TMP173:%.*]] = load i32, i32* [[RETVAL]], align 4
// CHECK13-NEXT: ret i32 [[TMP173]]
//
//
// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139
// CHECK13-SAME: (i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2:[0-9]+]] {
// CHECK13-NEXT: entry:
// CHECK13-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK13-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
// CHECK13-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
// CHECK13-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK13-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK13-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
// CHECK13-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK13-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK13-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK13-NEXT: [[CONV1:%.*]] = bitcast i64* [[N_CASTED]] to i32*
// CHECK13-NEXT: store i32 [[TMP2]], i32* [[CONV1]], align 4
// CHECK13-NEXT: [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8
// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]])
// CHECK13-NEXT: ret void
//
//
// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK13-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK13-NEXT: entry:
// CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK13-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[I3:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
// CHECK13-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK13-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK13-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
// CHECK13-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK13-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK13-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
// CHECK13-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK13-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK13-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK13-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK13-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK13-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK13-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK13-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK13-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK13-NEXT: store i32 0, i32* [[I]], align 4
// CHECK13-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK13-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK13-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK13: omp.precond.then:
// CHECK13-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK13-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK13-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK13-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK13-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK13-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
// CHECK13-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK13-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK13-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
// CHECK13-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK13: cond.true:
// CHECK13-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK13-NEXT: br label [[COND_END:%.*]]
// CHECK13: cond.false:
// CHECK13-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: br label [[COND_END]]
// CHECK13: cond.end:
// CHECK13-NEXT: [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
// CHECK13-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK13-NEXT: store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK13: omp.inner.for.cond:
// CHECK13-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
// CHECK13-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK13: omp.inner.for.body:
// CHECK13-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK13-NEXT: [[TMP16:%.*]] = zext i32 [[TMP15]] to i64
// CHECK13-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
// CHECK13-NEXT: [[TMP19:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK13-NEXT: [[CONV6:%.*]] = bitcast i64* [[N_CASTED]] to i32*
// CHECK13-NEXT: store i32 [[TMP19]], i32* [[CONV6]], align 4
// CHECK13-NEXT: [[TMP20:%.*]] = load i64, i64* [[N_CASTED]], align 8
// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]])
// CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK13: omp.inner.for.inc:
// CHECK13-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
// CHECK13-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK13: omp.inner.for.end:
// CHECK13-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK13: omp.loop.exit:
// CHECK13-NEXT: [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK13-NEXT: [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
// CHECK13-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
// CHECK13-NEXT: br label [[OMP_PRECOND_END]]
// CHECK13: omp.precond.end:
// CHECK13-NEXT: ret void
//
//
// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK13-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK13-NEXT: entry:
// CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK13-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[I5:%.*]] = alloca i32, align 4
// CHECK13-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK13-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK13-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK13-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK13-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
// CHECK13-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK13-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK13-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
// CHECK13-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK13-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK13-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK13-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK13-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK13-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK13-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK13-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK13-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK13-NEXT: store i32 0, i32* [[I]], align 4
// CHECK13-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK13-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK13-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK13: omp.precond.then:
// CHECK13-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK13-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK13-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
// CHECK13-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK13-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP6]] to i32
// CHECK13-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK13-NEXT: [[CONV4:%.*]] = trunc i64 [[TMP7]] to i32
// CHECK13-NEXT: store i32 [[CONV3]], i32* [[DOTOMP_LB]], align 4
// CHECK13-NEXT: store i32 [[CONV4]], i32* [[DOTOMP_UB]], align 4
// CHECK13-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK13-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK13-NEXT: [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK13-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
// CHECK13-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP9]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK13-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK13-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK13-NEXT: [[CMP6:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
// CHECK13-NEXT: br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK13: cond.true:
// CHECK13-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK13-NEXT: br label [[COND_END:%.*]]
// CHECK13: cond.false:
// CHECK13-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK13-NEXT: br label [[COND_END]]
// CHECK13: cond.end:
// CHECK13-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
// CHECK13-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK13-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK13-NEXT: store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK13: omp.inner.for.cond:
// CHECK13-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK13-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
// CHECK13-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK13: omp.inner.for.body:
// CHECK13-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP17]], 1
// CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK13-NEXT: store i32 [[ADD]], i32* [[I5]], align 4
// CHECK13-NEXT: [[TMP18:%.*]] = load i32, i32* [[I5]], align 4
// CHECK13-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP18]] to i64
// CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 [[IDXPROM]]
// CHECK13-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK13: omp.body.continue:
// CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK13: omp.inner.for.inc:
// CHECK13-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP19]], 1
// CHECK13-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK13: omp.inner.for.end:
// CHECK13-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK13: omp.loop.exit:
// CHECK13-NEXT: [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK13-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
// CHECK13-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
// CHECK13-NEXT: br label [[OMP_PRECOND_END]]
// CHECK13: omp.precond.end:
// CHECK13-NEXT: ret void
//
//
// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143
// CHECK13-SAME: (i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK13-NEXT: entry:
// CHECK13-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK13-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
// CHECK13-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
// CHECK13-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK13-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK13-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
// CHECK13-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK13-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK13-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK13-NEXT: [[CONV1:%.*]] = bitcast i64* [[N_CASTED]] to i32*
// CHECK13-NEXT: store i32 [[TMP2]], i32* [[CONV1]], align 4
// CHECK13-NEXT: [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8
// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]])
// CHECK13-NEXT: ret void
//
//
// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..2
// CHECK13-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK13-NEXT: entry:
// CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK13-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[I3:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
// CHECK13-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK13-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK13-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
// CHECK13-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK13-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK13-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
// CHECK13-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK13-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK13-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK13-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK13-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK13-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK13-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK13-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK13-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK13-NEXT: store i32 0, i32* [[I]], align 4
// CHECK13-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK13-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK13-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK13: omp.precond.then:
// CHECK13-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK13-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK13-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK13-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK13-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK13-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
// CHECK13-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK13-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK13-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
// CHECK13-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK13: cond.true:
// CHECK13-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK13-NEXT: br label [[COND_END:%.*]]
// CHECK13: cond.false:
// CHECK13-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: br label [[COND_END]]
// CHECK13: cond.end:
// CHECK13-NEXT: [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
// CHECK13-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK13-NEXT: store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK13: omp.inner.for.cond:
// CHECK13-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
// CHECK13-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK13: omp.inner.for.body:
// CHECK13-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK13-NEXT: [[TMP16:%.*]] = zext i32 [[TMP15]] to i64
// CHECK13-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
// CHECK13-NEXT: [[TMP19:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK13-NEXT: [[CONV6:%.*]] = bitcast i64* [[N_CASTED]] to i32*
// CHECK13-NEXT: store i32 [[TMP19]], i32* [[CONV6]], align 4
// CHECK13-NEXT: [[TMP20:%.*]] = load i64, i64* [[N_CASTED]], align 8
// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]])
// CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK13: omp.inner.for.inc:
// CHECK13-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
// CHECK13-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK13: omp.inner.for.end:
// CHECK13-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK13: omp.loop.exit:
// CHECK13-NEXT: [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK13-NEXT: [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
// CHECK13-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
// CHECK13-NEXT: br label [[OMP_PRECOND_END]]
// CHECK13: omp.precond.end:
// CHECK13-NEXT: ret void
//
//
// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..3
// CHECK13-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK13-NEXT: entry:
// CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK13-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[I5:%.*]] = alloca i32, align 4
// CHECK13-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK13-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK13-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK13-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK13-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
// CHECK13-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK13-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK13-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
// CHECK13-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK13-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK13-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK13-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK13-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK13-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK13-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK13-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK13-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK13-NEXT: store i32 0, i32* [[I]], align 4
// CHECK13-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK13-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK13-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK13: omp.precond.then:
// CHECK13-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK13-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK13-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
// CHECK13-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK13-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP6]] to i32
// CHECK13-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK13-NEXT: [[CONV4:%.*]] = trunc i64 [[TMP7]] to i32
// CHECK13-NEXT: store i32 [[CONV3]], i32* [[DOTOMP_LB]], align 4
// CHECK13-NEXT: store i32 [[CONV4]], i32* [[DOTOMP_UB]], align 4
// CHECK13-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK13-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK13-NEXT: [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK13-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
// CHECK13-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP9]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK13-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK13-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK13-NEXT: [[CMP6:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
// CHECK13-NEXT: br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK13: cond.true:
// CHECK13-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK13-NEXT: br label [[COND_END:%.*]]
// CHECK13: cond.false:
// CHECK13-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK13-NEXT: br label [[COND_END]]
// CHECK13: cond.end:
// CHECK13-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
// CHECK13-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK13-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK13-NEXT: store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK13: omp.inner.for.cond:
// CHECK13-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK13-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
// CHECK13-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK13: omp.inner.for.body:
// CHECK13-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP17]], 1
// CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK13-NEXT: store i32 [[ADD]], i32* [[I5]], align 4
// CHECK13-NEXT: [[TMP18:%.*]] = load i32, i32* [[I5]], align 4
// CHECK13-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP18]] to i64
// CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 [[IDXPROM]]
// CHECK13-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK13: omp.body.continue:
// CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK13: omp.inner.for.inc:
// CHECK13-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP19]], 1
// CHECK13-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK13: omp.inner.for.end:
// CHECK13-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK13: omp.loop.exit:
// CHECK13-NEXT: [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK13-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
// CHECK13-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
// CHECK13-NEXT: br label [[OMP_PRECOND_END]]
// CHECK13: omp.precond.end:
// CHECK13-NEXT: ret void
//
//
// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147
// CHECK13-SAME: (i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK13-NEXT: entry:
// CHECK13-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK13-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
// CHECK13-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
// CHECK13-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK13-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK13-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
// CHECK13-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
// CHECK13-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK13-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK13-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
// CHECK13-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK13-NEXT: [[CONV2:%.*]] = bitcast i64* [[N_CASTED]] to i32*
// CHECK13-NEXT: store i32 [[TMP2]], i32* [[CONV2]], align 4
// CHECK13-NEXT: [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8
// CHECK13-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV1]], align 8
// CHECK13-NEXT: [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
// CHECK13-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4
// CHECK13-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]])
// CHECK13-NEXT: ret void
//
//
// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..5
// CHECK13-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK13-NEXT: entry:
// CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK13-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK13-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[I5:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
// CHECK13-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK13-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK13-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
// CHECK13-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK13-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK13-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
// CHECK13-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
// CHECK13-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK13-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK13-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
// CHECK13-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK13-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK13-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK13-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK13-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK13-NEXT: [[SUB4:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK13-NEXT: store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK13-NEXT: store i32 0, i32* [[I]], align 4
// CHECK13-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK13-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK13-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK13: omp.precond.then:
// CHECK13-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK13-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK13-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK13-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK13-NEXT: [[TMP6:%.*]] = load i32, i32* [[CONV1]], align 8
// CHECK13-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK13-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
// CHECK13-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP6]])
// CHECK13-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK13-NEXT: [[CMP6:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
// CHECK13-NEXT: br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK13: cond.true:
// CHECK13-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK13-NEXT: br label [[COND_END:%.*]]
// CHECK13: cond.false:
// CHECK13-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: br label [[COND_END]]
// CHECK13: cond.end:
// CHECK13-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
// CHECK13-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK13-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK13: omp.inner.for.cond:
// CHECK13-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP15]], 1
// CHECK13-NEXT: [[CMP7:%.*]] = icmp slt i32 [[TMP14]], [[ADD]]
// CHECK13-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK13: omp.inner.for.body:
// CHECK13-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK13-NEXT: [[TMP17:%.*]] = zext i32 [[TMP16]] to i64
// CHECK13-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: [[TMP19:%.*]] = zext i32 [[TMP18]] to i64
// CHECK13-NEXT: [[TMP20:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK13-NEXT: [[CONV8:%.*]] = bitcast i64* [[N_CASTED]] to i32*
// CHECK13-NEXT: store i32 [[TMP20]], i32* [[CONV8]], align 4
// CHECK13-NEXT: [[TMP21:%.*]] = load i64, i64* [[N_CASTED]], align 8
// CHECK13-NEXT: [[TMP22:%.*]] = load i32, i32* [[CONV1]], align 8
// CHECK13-NEXT: [[CONV9:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
// CHECK13-NEXT: store i32 [[TMP22]], i32* [[CONV9]], align 4
// CHECK13-NEXT: [[TMP23:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*, i64)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i64 [[TMP21]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP23]])
// CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK13: omp.inner.for.inc:
// CHECK13-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK13-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP24]], [[TMP25]]
// CHECK13-NEXT: store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: [[TMP26:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK13-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK13-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP26]], [[TMP27]]
// CHECK13-NEXT: store i32 [[ADD11]], i32* [[DOTOMP_COMB_LB]], align 4
// CHECK13-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK13-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP28]], [[TMP29]]
// CHECK13-NEXT: store i32 [[ADD12]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK13-NEXT: [[CMP13:%.*]] = icmp sgt i32 [[TMP30]], [[TMP31]]
// CHECK13-NEXT: br i1 [[CMP13]], label [[COND_TRUE14:%.*]], label [[COND_FALSE15:%.*]]
// CHECK13: cond.true14:
// CHECK13-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK13-NEXT: br label [[COND_END16:%.*]]
// CHECK13: cond.false15:
// CHECK13-NEXT: [[TMP33:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: br label [[COND_END16]]
// CHECK13: cond.end16:
// CHECK13-NEXT: [[COND17:%.*]] = phi i32 [ [[TMP32]], [[COND_TRUE14]] ], [ [[TMP33]], [[COND_FALSE15]] ]
// CHECK13-NEXT: store i32 [[COND17]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: [[TMP34:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK13-NEXT: store i32 [[TMP34]], i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK13: omp.inner.for.end:
// CHECK13-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK13: omp.loop.exit:
// CHECK13-NEXT: [[TMP35:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK13-NEXT: [[TMP36:%.*]] = load i32, i32* [[TMP35]], align 4
// CHECK13-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP36]])
// CHECK13-NEXT: br label [[OMP_PRECOND_END]]
// CHECK13: omp.precond.end:
// CHECK13-NEXT: ret void
//
//
// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..6
// CHECK13-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK13-NEXT: entry:
// CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK13-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK13-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[I7:%.*]] = alloca i32, align 4
// CHECK13-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK13-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK13-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK13-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK13-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
// CHECK13-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK13-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK13-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
// CHECK13-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
// CHECK13-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK13-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK13-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
// CHECK13-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK13-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK13-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK13-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK13-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK13-NEXT: [[SUB4:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK13-NEXT: store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK13-NEXT: store i32 0, i32* [[I]], align 4
// CHECK13-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK13-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK13-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK13: omp.precond.then:
// CHECK13-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK13-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK13-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
// CHECK13-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK13-NEXT: [[CONV5:%.*]] = trunc i64 [[TMP6]] to i32
// CHECK13-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK13-NEXT: [[CONV6:%.*]] = trunc i64 [[TMP7]] to i32
// CHECK13-NEXT: store i32 [[CONV5]], i32* [[DOTOMP_LB]], align 4
// CHECK13-NEXT: store i32 [[CONV6]], i32* [[DOTOMP_UB]], align 4
// CHECK13-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK13-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK13-NEXT: [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK13-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
// CHECK13-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP9]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK13-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK13-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK13-NEXT: [[CMP8:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
// CHECK13-NEXT: br i1 [[CMP8]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK13: cond.true:
// CHECK13-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK13-NEXT: br label [[COND_END:%.*]]
// CHECK13: cond.false:
// CHECK13-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK13-NEXT: br label [[COND_END]]
// CHECK13: cond.end:
// CHECK13-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
// CHECK13-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK13-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK13-NEXT: store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK13: omp.inner.for.cond:
// CHECK13-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK13-NEXT: [[CMP9:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
// CHECK13-NEXT: br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK13: omp.inner.for.body:
// CHECK13-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP17]], 1
// CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK13-NEXT: store i32 [[ADD]], i32* [[I7]], align 4
// CHECK13-NEXT: [[TMP18:%.*]] = load i32, i32* [[I7]], align 4
// CHECK13-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP18]] to i64
// CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 [[IDXPROM]]
// CHECK13-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK13: omp.body.continue:
// CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK13: omp.inner.for.inc:
// CHECK13-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP19]], 1
// CHECK13-NEXT: store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK13: omp.inner.for.end:
// CHECK13-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK13: omp.loop.exit:
// CHECK13-NEXT: [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK13-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
// CHECK13-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
// CHECK13-NEXT: br label [[OMP_PRECOND_END]]
// CHECK13: omp.precond.end:
// CHECK13-NEXT: ret void
//
//
// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151
// CHECK13-SAME: (i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK13-NEXT: entry:
// CHECK13-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK13-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
// CHECK13-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
// CHECK13-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK13-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK13-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
// CHECK13-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK13-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK13-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK13-NEXT: [[CONV1:%.*]] = bitcast i64* [[N_CASTED]] to i32*
// CHECK13-NEXT: store i32 [[TMP2]], i32* [[CONV1]], align 4
// CHECK13-NEXT: [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8
// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]])
// CHECK13-NEXT: ret void
//
//
// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..8
// CHECK13-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK13-NEXT: entry:
// CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK13-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[I3:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
// CHECK13-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK13-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK13-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
// CHECK13-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK13-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK13-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
// CHECK13-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK13-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK13-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK13-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK13-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK13-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK13-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK13-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK13-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK13-NEXT: store i32 0, i32* [[I]], align 4
// CHECK13-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK13-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK13-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK13: omp.precond.then:
// CHECK13-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK13-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK13-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK13-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK13-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK13-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
// CHECK13-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK13-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK13-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
// CHECK13-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK13: cond.true:
// CHECK13-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK13-NEXT: br label [[COND_END:%.*]]
// CHECK13: cond.false:
// CHECK13-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: br label [[COND_END]]
// CHECK13: cond.end:
// CHECK13-NEXT: [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
// CHECK13-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK13-NEXT: store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK13: omp.inner.for.cond:
// CHECK13-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
// CHECK13-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK13: omp.inner.for.body:
// CHECK13-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK13-NEXT: [[TMP16:%.*]] = zext i32 [[TMP15]] to i64
// CHECK13-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
// CHECK13-NEXT: [[TMP19:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK13-NEXT: [[CONV6:%.*]] = bitcast i64* [[N_CASTED]] to i32*
// CHECK13-NEXT: store i32 [[TMP19]], i32* [[CONV6]], align 4
// CHECK13-NEXT: [[TMP20:%.*]] = load i64, i64* [[N_CASTED]], align 8
// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]])
// CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK13: omp.inner.for.inc:
// CHECK13-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
// CHECK13-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK13: omp.inner.for.end:
// CHECK13-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK13: omp.loop.exit:
// CHECK13-NEXT: [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK13-NEXT: [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
// CHECK13-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
// CHECK13-NEXT: br label [[OMP_PRECOND_END]]
// CHECK13: omp.precond.end:
// CHECK13-NEXT: ret void
//
//
// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..9
// CHECK13-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK13-NEXT: entry:
// CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK13-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[I5:%.*]] = alloca i32, align 4
// CHECK13-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK13-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK13-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK13-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK13-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
// CHECK13-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK13-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK13-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
// CHECK13-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK13-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK13-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK13-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK13-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK13-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK13-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK13-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK13-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK13-NEXT: store i32 0, i32* [[I]], align 4
// CHECK13-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK13-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK13-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK13: omp.precond.then:
// CHECK13-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK13-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK13-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
// CHECK13-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK13-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP6]] to i32
// CHECK13-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK13-NEXT: [[CONV4:%.*]] = trunc i64 [[TMP7]] to i32
// CHECK13-NEXT: store i32 [[CONV3]], i32* [[DOTOMP_LB]], align 4
// CHECK13-NEXT: store i32 [[CONV4]], i32* [[DOTOMP_UB]], align 4
// CHECK13-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK13-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK13-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK13-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK13-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK13-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
// CHECK13-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP11]], i32 35, i32 [[TMP8]], i32 [[TMP9]], i32 1, i32 1)
// CHECK13-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK13: omp.dispatch.cond:
// CHECK13-NEXT: [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK13-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
// CHECK13-NEXT: [[TMP14:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP13]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
// CHECK13-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP14]], 0
// CHECK13-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK13: omp.dispatch.body:
// CHECK13-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK13-NEXT: store i32 [[TMP15]], i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK13: omp.inner.for.cond:
// CHECK13-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15
// CHECK13-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !15
// CHECK13-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
// CHECK13-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK13: omp.inner.for.body:
// CHECK13-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15
// CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
// CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK13-NEXT: store i32 [[ADD]], i32* [[I5]], align 4, !llvm.access.group !15
// CHECK13-NEXT: [[TMP19:%.*]] = load i32, i32* [[I5]], align 4, !llvm.access.group !15
// CHECK13-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP19]] to i64
// CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 [[IDXPROM]]
// CHECK13-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !15
// CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK13: omp.body.continue:
// CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK13: omp.inner.for.inc:
// CHECK13-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15
// CHECK13-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP20]], 1
// CHECK13-NEXT: store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15
// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]]
// CHECK13: omp.inner.for.end:
// CHECK13-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK13: omp.dispatch.inc:
// CHECK13-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK13: omp.dispatch.end:
// CHECK13-NEXT: br label [[OMP_PRECOND_END]]
// CHECK13: omp.precond.end:
// CHECK13-NEXT: ret void
//
//
// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155
// CHECK13-SAME: (i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK13-NEXT: entry:
// CHECK13-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK13-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
// CHECK13-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
// CHECK13-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK13-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK13-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
// CHECK13-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
// CHECK13-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK13-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK13-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
// CHECK13-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK13-NEXT: [[CONV2:%.*]] = bitcast i64* [[N_CASTED]] to i32*
// CHECK13-NEXT: store i32 [[TMP2]], i32* [[CONV2]], align 4
// CHECK13-NEXT: [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8
// CHECK13-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV1]], align 8
// CHECK13-NEXT: [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
// CHECK13-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4
// CHECK13-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]])
// CHECK13-NEXT: ret void
//
//
// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..11
// CHECK13-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK13-NEXT: entry:
// CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK13-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK13-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[I5:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
// CHECK13-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK13-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK13-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
// CHECK13-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK13-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK13-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
// CHECK13-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
// CHECK13-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK13-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK13-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
// CHECK13-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK13-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK13-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK13-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK13-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK13-NEXT: [[SUB4:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK13-NEXT: store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK13-NEXT: store i32 0, i32* [[I]], align 4
// CHECK13-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK13-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK13-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK13: omp.precond.then:
// CHECK13-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK13-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK13-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK13-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK13-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK13-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
// CHECK13-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK13-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK13-NEXT: [[CMP6:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
// CHECK13-NEXT: br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK13: cond.true:
// CHECK13-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK13-NEXT: br label [[COND_END:%.*]]
// CHECK13: cond.false:
// CHECK13-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: br label [[COND_END]]
// CHECK13: cond.end:
// CHECK13-NEXT: [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
// CHECK13-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK13-NEXT: store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK13: omp.inner.for.cond:
// CHECK13-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
// CHECK13-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK13: omp.inner.for.body:
// CHECK13-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK13-NEXT: [[TMP16:%.*]] = zext i32 [[TMP15]] to i64
// CHECK13-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
// CHECK13-NEXT: [[TMP19:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK13-NEXT: [[CONV8:%.*]] = bitcast i64* [[N_CASTED]] to i32*
// CHECK13-NEXT: store i32 [[TMP19]], i32* [[CONV8]], align 4
// CHECK13-NEXT: [[TMP20:%.*]] = load i64, i64* [[N_CASTED]], align 8
// CHECK13-NEXT: [[TMP21:%.*]] = load i32, i32* [[CONV1]], align 8
// CHECK13-NEXT: [[CONV9:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
// CHECK13-NEXT: store i32 [[TMP21]], i32* [[CONV9]], align 4
// CHECK13-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*, i64)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP22]])
// CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK13: omp.inner.for.inc:
// CHECK13-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
// CHECK13-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK13: omp.inner.for.end:
// CHECK13-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK13: omp.loop.exit:
// CHECK13-NEXT: [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK13-NEXT: [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
// CHECK13-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP26]])
// CHECK13-NEXT: br label [[OMP_PRECOND_END]]
// CHECK13: omp.precond.end:
// CHECK13-NEXT: ret void
//
//
// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..12
// CHECK13-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK13-NEXT: entry:
// CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK13-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK13-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[I7:%.*]] = alloca i32, align 4
// CHECK13-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK13-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK13-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK13-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK13-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
// CHECK13-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK13-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK13-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
// CHECK13-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
// CHECK13-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK13-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK13-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
// CHECK13-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK13-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK13-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK13-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK13-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK13-NEXT: [[SUB4:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK13-NEXT: store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK13-NEXT: store i32 0, i32* [[I]], align 4
// CHECK13-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK13-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK13-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK13: omp.precond.then:
// CHECK13-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK13-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK13-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
// CHECK13-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK13-NEXT: [[CONV5:%.*]] = trunc i64 [[TMP6]] to i32
// CHECK13-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK13-NEXT: [[CONV6:%.*]] = trunc i64 [[TMP7]] to i32
// CHECK13-NEXT: store i32 [[CONV5]], i32* [[DOTOMP_LB]], align 4
// CHECK13-NEXT: store i32 [[CONV6]], i32* [[DOTOMP_UB]], align 4
// CHECK13-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK13-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK13-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV1]], align 8
// CHECK13-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK13-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK13-NEXT: [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK13-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
// CHECK13-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP12]], i32 35, i32 [[TMP9]], i32 [[TMP10]], i32 1, i32 [[TMP8]])
// CHECK13-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK13: omp.dispatch.cond:
// CHECK13-NEXT: [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK13-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
// CHECK13-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
// CHECK13-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP15]], 0
// CHECK13-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK13: omp.dispatch.body:
// CHECK13-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK13-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK13: omp.inner.for.cond:
// CHECK13-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
// CHECK13-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !18
// CHECK13-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
// CHECK13-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK13: omp.inner.for.body:
// CHECK13-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
// CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
// CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK13-NEXT: store i32 [[ADD]], i32* [[I7]], align 4, !llvm.access.group !18
// CHECK13-NEXT: [[TMP20:%.*]] = load i32, i32* [[I7]], align 4, !llvm.access.group !18
// CHECK13-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP20]] to i64
// CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 [[IDXPROM]]
// CHECK13-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !18
// CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK13: omp.body.continue:
// CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK13: omp.inner.for.inc:
// CHECK13-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
// CHECK13-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP21]], 1
// CHECK13-NEXT: store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
// CHECK13: omp.inner.for.end:
// CHECK13-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK13: omp.dispatch.inc:
// CHECK13-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK13: omp.dispatch.end:
// CHECK13-NEXT: br label [[OMP_PRECOND_END]]
// CHECK13: omp.precond.end:
// CHECK13-NEXT: ret void
//
//
// CHECK13-LABEL: define {{[^@]+}}@_Z5tmainIiLi10EEiT_
// CHECK13-SAME: (i32 signext [[ARGC:%.*]]) #[[ATTR4:[0-9]+]] comdat {
// CHECK13-NEXT: entry:
// CHECK13-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[A:%.*]] = alloca [10 x i32], align 4
// CHECK13-NEXT: [[M:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8
// CHECK13-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8
// CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8
// CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS1:%.*]] = alloca [1 x i8*], align 8
// CHECK13-NEXT: [[DOTOFFLOAD_PTRS2:%.*]] = alloca [1 x i8*], align 8
// CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS3:%.*]] = alloca [1 x i8*], align 8
// CHECK13-NEXT: [[_TMP4:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS7:%.*]] = alloca [2 x i8*], align 8
// CHECK13-NEXT: [[DOTOFFLOAD_PTRS8:%.*]] = alloca [2 x i8*], align 8
// CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS9:%.*]] = alloca [2 x i8*], align 8
// CHECK13-NEXT: [[_TMP10:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS13:%.*]] = alloca [1 x i8*], align 8
// CHECK13-NEXT: [[DOTOFFLOAD_PTRS14:%.*]] = alloca [1 x i8*], align 8
// CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS15:%.*]] = alloca [1 x i8*], align 8
// CHECK13-NEXT: [[_TMP16:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTCAPTURE_EXPR_19:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTCAPTURE_EXPR__CASTED20:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS22:%.*]] = alloca [2 x i8*], align 8
// CHECK13-NEXT: [[DOTOFFLOAD_PTRS23:%.*]] = alloca [2 x i8*], align 8
// CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS24:%.*]] = alloca [2 x i8*], align 8
// CHECK13-NEXT: [[_TMP25:%.*]] = alloca i32, align 4
// CHECK13-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
// CHECK13-NEXT: store i32 10, i32* [[M]], align 4
// CHECK13-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK13-NEXT: [[TMP1:%.*]] = bitcast i8** [[TMP0]] to [10 x i32]**
// CHECK13-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP1]], align 8
// CHECK13-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK13-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to [10 x i32]**
// CHECK13-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP3]], align 8
// CHECK13-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
// CHECK13-NEXT: store i8* null, i8** [[TMP4]], align 8
// CHECK13-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK13-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
// CHECK13-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK13-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0
// CHECK13-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK13: omp_offload.failed:
// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112([10 x i32]* [[A]]) #[[ATTR3]]
// CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK13: omp_offload.cont:
// CHECK13-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
// CHECK13-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to [10 x i32]**
// CHECK13-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP10]], align 8
// CHECK13-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
// CHECK13-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to [10 x i32]**
// CHECK13-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP12]], align 8
// CHECK13-NEXT: [[TMP13:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i64 0, i64 0
// CHECK13-NEXT: store i8* null, i8** [[TMP13]], align 8
// CHECK13-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
// CHECK13-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
// CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
// CHECK13-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK13-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
// CHECK13-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]]
// CHECK13: omp_offload.failed5:
// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116([10 x i32]* [[A]]) #[[ATTR3]]
// CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT6]]
// CHECK13: omp_offload.cont6:
// CHECK13-NEXT: [[TMP18:%.*]] = load i32, i32* [[M]], align 4
// CHECK13-NEXT: store i32 [[TMP18]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK13-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK13-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
// CHECK13-NEXT: store i32 [[TMP19]], i32* [[CONV]], align 4
// CHECK13-NEXT: [[TMP20:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
// CHECK13-NEXT: [[TMP21:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0
// CHECK13-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to [10 x i32]**
// CHECK13-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP22]], align 8
// CHECK13-NEXT: [[TMP23:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0
// CHECK13-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to [10 x i32]**
// CHECK13-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP24]], align 8
// CHECK13-NEXT: [[TMP25:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i64 0, i64 0
// CHECK13-NEXT: store i8* null, i8** [[TMP25]], align 8
// CHECK13-NEXT: [[TMP26:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 1
// CHECK13-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i64*
// CHECK13-NEXT: store i64 [[TMP20]], i64* [[TMP27]], align 8
// CHECK13-NEXT: [[TMP28:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 1
// CHECK13-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64*
// CHECK13-NEXT: store i64 [[TMP20]], i64* [[TMP29]], align 8
// CHECK13-NEXT: [[TMP30:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i64 0, i64 1
// CHECK13-NEXT: store i8* null, i8** [[TMP30]], align 8
// CHECK13-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0
// CHECK13-NEXT: [[TMP32:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0
// CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
// CHECK13-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.23, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.24, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK13-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0
// CHECK13-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]]
// CHECK13: omp_offload.failed11:
// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120([10 x i32]* [[A]], i64 [[TMP20]]) #[[ATTR3]]
// CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT12]]
// CHECK13: omp_offload.cont12:
// CHECK13-NEXT: [[TMP35:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0
// CHECK13-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to [10 x i32]**
// CHECK13-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP36]], align 8
// CHECK13-NEXT: [[TMP37:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0
// CHECK13-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to [10 x i32]**
// CHECK13-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP38]], align 8
// CHECK13-NEXT: [[TMP39:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i64 0, i64 0
// CHECK13-NEXT: store i8* null, i8** [[TMP39]], align 8
// CHECK13-NEXT: [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0
// CHECK13-NEXT: [[TMP41:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0
// CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
// CHECK13-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124.region_id, i32 1, i8** [[TMP40]], i8** [[TMP41]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.27, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.28, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK13-NEXT: [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0
// CHECK13-NEXT: br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]]
// CHECK13: omp_offload.failed17:
// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124([10 x i32]* [[A]]) #[[ATTR3]]
// CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT18]]
// CHECK13: omp_offload.cont18:
// CHECK13-NEXT: [[TMP44:%.*]] = load i32, i32* [[M]], align 4
// CHECK13-NEXT: store i32 [[TMP44]], i32* [[DOTCAPTURE_EXPR_19]], align 4
// CHECK13-NEXT: [[TMP45:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_19]], align 4
// CHECK13-NEXT: [[CONV21:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED20]] to i32*
// CHECK13-NEXT: store i32 [[TMP45]], i32* [[CONV21]], align 4
// CHECK13-NEXT: [[TMP46:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED20]], align 8
// CHECK13-NEXT: [[TMP47:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0
// CHECK13-NEXT: [[TMP48:%.*]] = bitcast i8** [[TMP47]] to [10 x i32]**
// CHECK13-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP48]], align 8
// CHECK13-NEXT: [[TMP49:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0
// CHECK13-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to [10 x i32]**
// CHECK13-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP50]], align 8
// CHECK13-NEXT: [[TMP51:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 0
// CHECK13-NEXT: store i8* null, i8** [[TMP51]], align 8
// CHECK13-NEXT: [[TMP52:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 1
// CHECK13-NEXT: [[TMP53:%.*]] = bitcast i8** [[TMP52]] to i64*
// CHECK13-NEXT: store i64 [[TMP46]], i64* [[TMP53]], align 8
// CHECK13-NEXT: [[TMP54:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 1
// CHECK13-NEXT: [[TMP55:%.*]] = bitcast i8** [[TMP54]] to i64*
// CHECK13-NEXT: store i64 [[TMP46]], i64* [[TMP55]], align 8
// CHECK13-NEXT: [[TMP56:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 1
// CHECK13-NEXT: store i8* null, i8** [[TMP56]], align 8
// CHECK13-NEXT: [[TMP57:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0
// CHECK13-NEXT: [[TMP58:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0
// CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
// CHECK13-NEXT: [[TMP59:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128.region_id, i32 2, i8** [[TMP57]], i8** [[TMP58]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.31, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.32, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK13-NEXT: [[TMP60:%.*]] = icmp ne i32 [[TMP59]], 0
// CHECK13-NEXT: br i1 [[TMP60]], label [[OMP_OFFLOAD_FAILED26:%.*]], label [[OMP_OFFLOAD_CONT27:%.*]]
// CHECK13: omp_offload.failed26:
// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128([10 x i32]* [[A]], i64 [[TMP46]]) #[[ATTR3]]
// CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT27]]
// CHECK13: omp_offload.cont27:
// CHECK13-NEXT: ret i32 0
//
//
// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112
// CHECK13-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK13-NEXT: entry:
// CHECK13-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK13-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
// CHECK13-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]])
// CHECK13-NEXT: ret void
//
//
// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..14
// CHECK13-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK13-NEXT: entry:
// CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK13-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK13-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK13-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK13-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
// CHECK13-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
// CHECK13-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK13-NEXT: store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK13-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK13-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK13-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK13-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK13-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
// CHECK13-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK13: cond.true:
// CHECK13-NEXT: br label [[COND_END:%.*]]
// CHECK13: cond.false:
// CHECK13-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: br label [[COND_END]]
// CHECK13: cond.end:
// CHECK13-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK13-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK13-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK13: omp.inner.for.cond:
// CHECK13-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK13-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK13: omp.inner.for.body:
// CHECK13-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK13-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
// CHECK13-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]])
// CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK13: omp.inner.for.inc:
// CHECK13-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
// CHECK13-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK13: omp.inner.for.end:
// CHECK13-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK13: omp.loop.exit:
// CHECK13-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK13-NEXT: ret void
//
//
// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..15
// CHECK13-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK13-NEXT: entry:
// CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK13-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK13-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK13-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK13-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK13-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK13-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
// CHECK13-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
// CHECK13-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK13-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4
// CHECK13-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK13-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
// CHECK13-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK13-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
// CHECK13-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
// CHECK13-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
// CHECK13-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK13-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK13-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK13-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK13-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK13-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK13-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 9
// CHECK13-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK13: cond.true:
// CHECK13-NEXT: br label [[COND_END:%.*]]
// CHECK13: cond.false:
// CHECK13-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK13-NEXT: br label [[COND_END]]
// CHECK13: cond.end:
// CHECK13-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
// CHECK13-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK13-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK13-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK13: omp.inner.for.cond:
// CHECK13-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK13-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
// CHECK13-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK13: omp.inner.for.body:
// CHECK13-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
// CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK13-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK13-NEXT: [[TMP11:%.*]] = load i32, i32* [[I]], align 4
// CHECK13-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
// CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]]
// CHECK13-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK13: omp.body.continue:
// CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK13: omp.inner.for.inc:
// CHECK13-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
// CHECK13-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK13: omp.inner.for.end:
// CHECK13-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK13: omp.loop.exit:
// CHECK13-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
// CHECK13-NEXT: ret void
//
//
// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116
// CHECK13-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK13-NEXT: entry:
// CHECK13-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK13-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
// CHECK13-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..17 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]])
// CHECK13-NEXT: ret void
//
//
// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..17
// CHECK13-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK13-NEXT: entry:
// CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK13-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK13-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK13-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK13-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
// CHECK13-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
// CHECK13-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK13-NEXT: store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK13-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK13-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK13-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK13-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK13-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
// CHECK13-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK13: cond.true:
// CHECK13-NEXT: br label [[COND_END:%.*]]
// CHECK13: cond.false:
// CHECK13-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: br label [[COND_END]]
// CHECK13: cond.end:
// CHECK13-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK13-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK13-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK13: omp.inner.for.cond:
// CHECK13-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK13-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK13: omp.inner.for.body:
// CHECK13-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK13-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
// CHECK13-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]])
// CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK13: omp.inner.for.inc:
// CHECK13-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
// CHECK13-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK13: omp.inner.for.end:
// CHECK13-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK13: omp.loop.exit:
// CHECK13-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK13-NEXT: ret void
//
//
// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..18
// CHECK13-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK13-NEXT: entry:
// CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK13-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK13-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK13-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK13-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK13-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK13-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
// CHECK13-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
// CHECK13-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK13-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4
// CHECK13-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK13-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
// CHECK13-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK13-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
// CHECK13-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
// CHECK13-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
// CHECK13-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK13-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK13-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK13-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK13-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK13-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK13-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 9
// CHECK13-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK13: cond.true:
// CHECK13-NEXT: br label [[COND_END:%.*]]
// CHECK13: cond.false:
// CHECK13-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK13-NEXT: br label [[COND_END]]
// CHECK13: cond.end:
// CHECK13-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
// CHECK13-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK13-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK13-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK13: omp.inner.for.cond:
// CHECK13-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK13-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
// CHECK13-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK13: omp.inner.for.body:
// CHECK13-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
// CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK13-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK13-NEXT: [[TMP11:%.*]] = load i32, i32* [[I]], align 4
// CHECK13-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
// CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]]
// CHECK13-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK13: omp.body.continue:
// CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK13: omp.inner.for.inc:
// CHECK13-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
// CHECK13-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK13: omp.inner.for.end:
// CHECK13-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK13: omp.loop.exit:
// CHECK13-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
// CHECK13-NEXT: ret void
//
//
// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120
// CHECK13-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK13-NEXT: entry:
// CHECK13-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK13-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
// CHECK13-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
// CHECK13-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
// CHECK13-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
// CHECK13-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
// CHECK13-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK13-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
// CHECK13-NEXT: store i32 [[TMP1]], i32* [[CONV1]], align 4
// CHECK13-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..21 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP2]])
// CHECK13-NEXT: ret void
//
//
// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..21
// CHECK13-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK13-NEXT: entry:
// CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK13-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK13-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
// CHECK13-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK13-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK13-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
// CHECK13-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
// CHECK13-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
// CHECK13-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
// CHECK13-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK13-NEXT: store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK13-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK13-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK13-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK13-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK13-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
// CHECK13-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK13: cond.true:
// CHECK13-NEXT: br label [[COND_END:%.*]]
// CHECK13: cond.false:
// CHECK13-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: br label [[COND_END]]
// CHECK13: cond.end:
// CHECK13-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK13-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK13-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK13: omp.inner.for.cond:
// CHECK13-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK13-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK13: omp.inner.for.body:
// CHECK13-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK13-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
// CHECK13-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
// CHECK13-NEXT: [[TMP12:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK13-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
// CHECK13-NEXT: store i32 [[TMP12]], i32* [[CONV2]], align 4
// CHECK13-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]])
// CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK13: omp.inner.for.inc:
// CHECK13-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]]
// CHECK13-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK13: omp.inner.for.end:
// CHECK13-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK13: omp.loop.exit:
// CHECK13-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK13-NEXT: ret void
//
//
// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..22
// CHECK13-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK13-NEXT: entry:
// CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK13-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK13-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK13-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK13-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK13-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK13-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK13-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
// CHECK13-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
// CHECK13-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
// CHECK13-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
// CHECK13-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK13-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4
// CHECK13-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK13-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
// CHECK13-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK13-NEXT: [[CONV2:%.*]] = trunc i64 [[TMP2]] to i32
// CHECK13-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_LB]], align 4
// CHECK13-NEXT: store i32 [[CONV2]], i32* [[DOTOMP_UB]], align 4
// CHECK13-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK13-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK13-NEXT: [[TMP3:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK13-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK13-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
// CHECK13-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP5]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP3]])
// CHECK13-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK13: omp.dispatch.cond:
// CHECK13-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK13-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK13-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP7]] to i32
// CHECK13-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], [[CONV3]]
// CHECK13-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK13: cond.true:
// CHECK13-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK13-NEXT: [[CONV4:%.*]] = trunc i64 [[TMP8]] to i32
// CHECK13-NEXT: br label [[COND_END:%.*]]
// CHECK13: cond.false:
// CHECK13-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK13-NEXT: br label [[COND_END]]
// CHECK13: cond.end:
// CHECK13-NEXT: [[COND:%.*]] = phi i32 [ [[CONV4]], [[COND_TRUE]] ], [ [[TMP9]], [[COND_FALSE]] ]
// CHECK13-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK13-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK13-NEXT: store i32 [[TMP10]], i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK13-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]]
// CHECK13-NEXT: br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK13: omp.dispatch.body:
// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK13: omp.inner.for.cond:
// CHECK13-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK13-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
// CHECK13-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK13: omp.inner.for.body:
// CHECK13-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP15]], 1
// CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK13-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK13-NEXT: [[TMP16:%.*]] = load i32, i32* [[I]], align 4
// CHECK13-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP16]] to i64
// CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]]
// CHECK13-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK13: omp.body.continue:
// CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK13: omp.inner.for.inc:
// CHECK13-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP17]], 1
// CHECK13-NEXT: store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK13: omp.inner.for.end:
// CHECK13-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK13: omp.dispatch.inc:
// CHECK13-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK13-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK13-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
// CHECK13-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_LB]], align 4
// CHECK13-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK13-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK13-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
// CHECK13-NEXT: store i32 [[ADD9]], i32* [[DOTOMP_UB]], align 4
// CHECK13-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK13: omp.dispatch.end:
// CHECK13-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
// CHECK13-NEXT: ret void
//
//
// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124
// CHECK13-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK13-NEXT: entry:
// CHECK13-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK13-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
// CHECK13-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..25 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]])
// CHECK13-NEXT: ret void
//
//
// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..25
// CHECK13-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK13-NEXT: entry:
// CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK13-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK13-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK13-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK13-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
// CHECK13-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
// CHECK13-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK13-NEXT: store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK13-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK13-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK13-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK13-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK13-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
// CHECK13-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK13: cond.true:
// CHECK13-NEXT: br label [[COND_END:%.*]]
// CHECK13: cond.false:
// CHECK13-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: br label [[COND_END]]
// CHECK13: cond.end:
// CHECK13-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK13-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK13-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK13: omp.inner.for.cond:
// CHECK13-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK13-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK13: omp.inner.for.body:
// CHECK13-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK13-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
// CHECK13-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]])
// CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK13: omp.inner.for.inc:
// CHECK13-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
// CHECK13-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK13: omp.inner.for.end:
// CHECK13-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK13: omp.loop.exit:
// CHECK13-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK13-NEXT: ret void
//
//
// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..26
// CHECK13-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK13-NEXT: entry:
// CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK13-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK13-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK13-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK13-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK13-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK13-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
// CHECK13-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
// CHECK13-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK13-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4
// CHECK13-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK13-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
// CHECK13-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK13-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
// CHECK13-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
// CHECK13-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
// CHECK13-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK13-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK13-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK13-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK13-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK13-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
// CHECK13-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32 35, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 1)
// CHECK13-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK13: omp.dispatch.cond:
// CHECK13-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
// CHECK13-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
// CHECK13-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK13: omp.dispatch.body:
// CHECK13-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK13-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK13: omp.inner.for.cond:
// CHECK13-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
// CHECK13-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !21
// CHECK13-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
// CHECK13-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK13: omp.inner.for.body:
// CHECK13-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
// CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
// CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK13-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !21
// CHECK13-NEXT: [[TMP12:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !21
// CHECK13-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP12]] to i64
// CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]]
// CHECK13-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !21
// CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK13: omp.body.continue:
// CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK13: omp.inner.for.inc:
// CHECK13-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
// CHECK13-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP13]], 1
// CHECK13-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]]
// CHECK13: omp.inner.for.end:
// CHECK13-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK13: omp.dispatch.inc:
// CHECK13-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK13: omp.dispatch.end:
// CHECK13-NEXT: ret void
//
//
// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128
// CHECK13-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK13-NEXT: entry:
// CHECK13-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK13-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
// CHECK13-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
// CHECK13-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
// CHECK13-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
// CHECK13-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
// CHECK13-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK13-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
// CHECK13-NEXT: store i32 [[TMP1]], i32* [[CONV1]], align 4
// CHECK13-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..29 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP2]])
// CHECK13-NEXT: ret void
//
//
// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..29
// CHECK13-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK13-NEXT: entry:
// CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK13-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK13-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
// CHECK13-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK13-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK13-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
// CHECK13-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
// CHECK13-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
// CHECK13-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
// CHECK13-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK13-NEXT: store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK13-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK13-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK13-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK13-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK13-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
// CHECK13-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK13: cond.true:
// CHECK13-NEXT: br label [[COND_END:%.*]]
// CHECK13: cond.false:
// CHECK13-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: br label [[COND_END]]
// CHECK13: cond.end:
// CHECK13-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK13-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK13-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK13: omp.inner.for.cond:
// CHECK13-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK13-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK13: omp.inner.for.body:
// CHECK13-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK13-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
// CHECK13-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK13-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
// CHECK13-NEXT: [[TMP12:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK13-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
// CHECK13-NEXT: store i32 [[TMP12]], i32* [[CONV2]], align 4
// CHECK13-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..30 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]])
// CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK13: omp.inner.for.inc:
// CHECK13-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]]
// CHECK13-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK13: omp.inner.for.end:
// CHECK13-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK13: omp.loop.exit:
// CHECK13-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK13-NEXT: ret void
//
//
// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..30
// CHECK13-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK13-NEXT: entry:
// CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK13-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK13-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK13-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK13-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK13-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK13-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK13-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
// CHECK13-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
// CHECK13-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
// CHECK13-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
// CHECK13-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK13-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4
// CHECK13-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK13-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
// CHECK13-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK13-NEXT: [[CONV2:%.*]] = trunc i64 [[TMP2]] to i32
// CHECK13-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_LB]], align 4
// CHECK13-NEXT: store i32 [[CONV2]], i32* [[DOTOMP_UB]], align 4
// CHECK13-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK13-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK13-NEXT: [[TMP3:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK13-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK13-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK13-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK13-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
// CHECK13-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], i32 35, i32 [[TMP4]], i32 [[TMP5]], i32 1, i32 [[TMP3]])
// CHECK13-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK13: omp.dispatch.cond:
// CHECK13-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
// CHECK13-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP8]], 0
// CHECK13-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK13: omp.dispatch.body:
// CHECK13-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK13-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK13: omp.inner.for.cond:
// CHECK13-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
// CHECK13-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !24
// CHECK13-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
// CHECK13-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK13: omp.inner.for.body:
// CHECK13-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
// CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
// CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK13-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !24
// CHECK13-NEXT: [[TMP13:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !24
// CHECK13-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP13]] to i64
// CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]]
// CHECK13-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !24
// CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK13: omp.body.continue:
// CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK13: omp.inner.for.inc:
// CHECK13-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
// CHECK13-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP14]], 1
// CHECK13-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]]
// CHECK13: omp.inner.for.end:
// CHECK13-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK13: omp.dispatch.inc:
// CHECK13-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK13: omp.dispatch.end:
// CHECK13-NEXT: ret void
//
//
// CHECK13-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
// CHECK13-SAME: () #[[ATTR5:[0-9]+]] {
// CHECK13-NEXT: entry:
// CHECK13-NEXT: call void @__tgt_register_requires(i64 1)
// CHECK13-NEXT: ret void
//
//
// CHECK14-LABEL: define {{[^@]+}}@main
// CHECK14-SAME: (i32 signext [[ARGC:%.*]], i8** [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK14-NEXT: entry:
// CHECK14-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[ARGV_ADDR:%.*]] = alloca i8**, align 8
// CHECK14-NEXT: [[N:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
// CHECK14-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[M:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8
// CHECK14-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8
// CHECK14-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8
// CHECK14-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8
// CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[N_CASTED3:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [3 x i8*], align 8
// CHECK14-NEXT: [[DOTOFFLOAD_PTRS6:%.*]] = alloca [3 x i8*], align 8
// CHECK14-NEXT: [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [3 x i8*], align 8
// CHECK14-NEXT: [[DOTOFFLOAD_SIZES8:%.*]] = alloca [3 x i64], align 8
// CHECK14-NEXT: [[_TMP9:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTCAPTURE_EXPR_11:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTCAPTURE_EXPR_18:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[N_CASTED19:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[DOTOFFLOAD_BASEPTRS22:%.*]] = alloca [4 x i8*], align 8
// CHECK14-NEXT: [[DOTOFFLOAD_PTRS23:%.*]] = alloca [4 x i8*], align 8
// CHECK14-NEXT: [[DOTOFFLOAD_MAPPERS24:%.*]] = alloca [4 x i8*], align 8
// CHECK14-NEXT: [[DOTOFFLOAD_SIZES25:%.*]] = alloca [4 x i64], align 8
// CHECK14-NEXT: [[_TMP26:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTCAPTURE_EXPR_27:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTCAPTURE_EXPR_28:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[N_CASTED35:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[DOTOFFLOAD_BASEPTRS37:%.*]] = alloca [3 x i8*], align 8
// CHECK14-NEXT: [[DOTOFFLOAD_PTRS38:%.*]] = alloca [3 x i8*], align 8
// CHECK14-NEXT: [[DOTOFFLOAD_MAPPERS39:%.*]] = alloca [3 x i8*], align 8
// CHECK14-NEXT: [[DOTOFFLOAD_SIZES40:%.*]] = alloca [3 x i64], align 8
// CHECK14-NEXT: [[_TMP41:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTCAPTURE_EXPR_42:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTCAPTURE_EXPR_43:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTCAPTURE_EXPR_50:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[N_CASTED51:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[DOTCAPTURE_EXPR__CASTED53:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[DOTOFFLOAD_BASEPTRS55:%.*]] = alloca [4 x i8*], align 8
// CHECK14-NEXT: [[DOTOFFLOAD_PTRS56:%.*]] = alloca [4 x i8*], align 8
// CHECK14-NEXT: [[DOTOFFLOAD_MAPPERS57:%.*]] = alloca [4 x i8*], align 8
// CHECK14-NEXT: [[DOTOFFLOAD_SIZES58:%.*]] = alloca [4 x i64], align 8
// CHECK14-NEXT: [[_TMP59:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTCAPTURE_EXPR_60:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTCAPTURE_EXPR_61:%.*]] = alloca i32, align 4
// CHECK14-NEXT: store i32 0, i32* [[RETVAL]], align 4
// CHECK14-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
// CHECK14-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8
// CHECK14-NEXT: store i32 100, i32* [[N]], align 4
// CHECK14-NEXT: [[TMP0:%.*]] = load i32, i32* [[N]], align 4
// CHECK14-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
// CHECK14-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave()
// CHECK14-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8
// CHECK14-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4
// CHECK14-NEXT: store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8
// CHECK14-NEXT: store i32 10, i32* [[M]], align 4
// CHECK14-NEXT: [[TMP3:%.*]] = load i32, i32* [[N]], align 4
// CHECK14-NEXT: [[CONV:%.*]] = bitcast i64* [[N_CASTED]] to i32*
// CHECK14-NEXT: store i32 [[TMP3]], i32* [[CONV]], align 4
// CHECK14-NEXT: [[TMP4:%.*]] = load i64, i64* [[N_CASTED]], align 8
// CHECK14-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP1]], 4
// CHECK14-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK14-NEXT: [[TMP7:%.*]] = bitcast i8** [[TMP6]] to i64*
// CHECK14-NEXT: store i64 [[TMP4]], i64* [[TMP7]], align 8
// CHECK14-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK14-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64*
// CHECK14-NEXT: store i64 [[TMP4]], i64* [[TMP9]], align 8
// CHECK14-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
// CHECK14-NEXT: store i64 4, i64* [[TMP10]], align 8
// CHECK14-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
// CHECK14-NEXT: store i8* null, i8** [[TMP11]], align 8
// CHECK14-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
// CHECK14-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64*
// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP13]], align 8
// CHECK14-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
// CHECK14-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64*
// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP15]], align 8
// CHECK14-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1
// CHECK14-NEXT: store i64 8, i64* [[TMP16]], align 8
// CHECK14-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
// CHECK14-NEXT: store i8* null, i8** [[TMP17]], align 8
// CHECK14-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
// CHECK14-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32**
// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 8
// CHECK14-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
// CHECK14-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32**
// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 8
// CHECK14-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2
// CHECK14-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 8
// CHECK14-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
// CHECK14-NEXT: store i8* null, i8** [[TMP23]], align 8
// CHECK14-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK14-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK14-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
// CHECK14-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4
// CHECK14-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK14-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK14-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0
// CHECK14-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK14-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK14-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK14-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1
// CHECK14-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64
// CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]])
// CHECK14-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK14-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
// CHECK14-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK14: omp_offload.failed:
// CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139(i64 [[TMP4]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]]
// CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK14: omp_offload.cont:
// CHECK14-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4
// CHECK14-NEXT: [[CONV4:%.*]] = bitcast i64* [[N_CASTED3]] to i32*
// CHECK14-NEXT: store i32 [[TMP33]], i32* [[CONV4]], align 4
// CHECK14-NEXT: [[TMP34:%.*]] = load i64, i64* [[N_CASTED3]], align 8
// CHECK14-NEXT: [[TMP35:%.*]] = mul nuw i64 [[TMP1]], 4
// CHECK14-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0
// CHECK14-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i64*
// CHECK14-NEXT: store i64 [[TMP34]], i64* [[TMP37]], align 8
// CHECK14-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0
// CHECK14-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i64*
// CHECK14-NEXT: store i64 [[TMP34]], i64* [[TMP39]], align 8
// CHECK14-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0
// CHECK14-NEXT: store i64 4, i64* [[TMP40]], align 8
// CHECK14-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0
// CHECK14-NEXT: store i8* null, i8** [[TMP41]], align 8
// CHECK14-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1
// CHECK14-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i64*
// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP43]], align 8
// CHECK14-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1
// CHECK14-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i64*
// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP45]], align 8
// CHECK14-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 1
// CHECK14-NEXT: store i64 8, i64* [[TMP46]], align 8
// CHECK14-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1
// CHECK14-NEXT: store i8* null, i8** [[TMP47]], align 8
// CHECK14-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2
// CHECK14-NEXT: [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32**
// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP49]], align 8
// CHECK14-NEXT: [[TMP50:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2
// CHECK14-NEXT: [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i32**
// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP51]], align 8
// CHECK14-NEXT: [[TMP52:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 2
// CHECK14-NEXT: store i64 [[TMP35]], i64* [[TMP52]], align 8
// CHECK14-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2
// CHECK14-NEXT: store i8* null, i8** [[TMP53]], align 8
// CHECK14-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0
// CHECK14-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0
// CHECK14-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0
// CHECK14-NEXT: [[TMP57:%.*]] = load i32, i32* [[N]], align 4
// CHECK14-NEXT: store i32 [[TMP57]], i32* [[DOTCAPTURE_EXPR_10]], align 4
// CHECK14-NEXT: [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4
// CHECK14-NEXT: [[SUB12:%.*]] = sub nsw i32 [[TMP58]], 0
// CHECK14-NEXT: [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1
// CHECK14-NEXT: [[SUB14:%.*]] = sub nsw i32 [[DIV13]], 1
// CHECK14-NEXT: store i32 [[SUB14]], i32* [[DOTCAPTURE_EXPR_11]], align 4
// CHECK14-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4
// CHECK14-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP59]], 1
// CHECK14-NEXT: [[TMP60:%.*]] = zext i32 [[ADD15]] to i64
// CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP60]])
// CHECK14-NEXT: [[TMP61:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143.region_id, i32 3, i8** [[TMP54]], i8** [[TMP55]], i64* [[TMP56]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK14-NEXT: [[TMP62:%.*]] = icmp ne i32 [[TMP61]], 0
// CHECK14-NEXT: br i1 [[TMP62]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]]
// CHECK14: omp_offload.failed16:
// CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143(i64 [[TMP34]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]]
// CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT17]]
// CHECK14: omp_offload.cont17:
// CHECK14-NEXT: [[TMP63:%.*]] = load i32, i32* [[M]], align 4
// CHECK14-NEXT: store i32 [[TMP63]], i32* [[DOTCAPTURE_EXPR_18]], align 4
// CHECK14-NEXT: [[TMP64:%.*]] = load i32, i32* [[N]], align 4
// CHECK14-NEXT: [[CONV20:%.*]] = bitcast i64* [[N_CASTED19]] to i32*
// CHECK14-NEXT: store i32 [[TMP64]], i32* [[CONV20]], align 4
// CHECK14-NEXT: [[TMP65:%.*]] = load i64, i64* [[N_CASTED19]], align 8
// CHECK14-NEXT: [[TMP66:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_18]], align 4
// CHECK14-NEXT: [[CONV21:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
// CHECK14-NEXT: store i32 [[TMP66]], i32* [[CONV21]], align 4
// CHECK14-NEXT: [[TMP67:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
// CHECK14-NEXT: [[TMP68:%.*]] = mul nuw i64 [[TMP1]], 4
// CHECK14-NEXT: [[TMP69:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0
// CHECK14-NEXT: [[TMP70:%.*]] = bitcast i8** [[TMP69]] to i64*
// CHECK14-NEXT: store i64 [[TMP65]], i64* [[TMP70]], align 8
// CHECK14-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0
// CHECK14-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i64*
// CHECK14-NEXT: store i64 [[TMP65]], i64* [[TMP72]], align 8
// CHECK14-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 0
// CHECK14-NEXT: store i64 4, i64* [[TMP73]], align 8
// CHECK14-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 0
// CHECK14-NEXT: store i8* null, i8** [[TMP74]], align 8
// CHECK14-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 1
// CHECK14-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i64*
// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP76]], align 8
// CHECK14-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 1
// CHECK14-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i64*
// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP78]], align 8
// CHECK14-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 1
// CHECK14-NEXT: store i64 8, i64* [[TMP79]], align 8
// CHECK14-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 1
// CHECK14-NEXT: store i8* null, i8** [[TMP80]], align 8
// CHECK14-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 2
// CHECK14-NEXT: [[TMP82:%.*]] = bitcast i8** [[TMP81]] to i32**
// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP82]], align 8
// CHECK14-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 2
// CHECK14-NEXT: [[TMP84:%.*]] = bitcast i8** [[TMP83]] to i32**
// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP84]], align 8
// CHECK14-NEXT: [[TMP85:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 2
// CHECK14-NEXT: store i64 [[TMP68]], i64* [[TMP85]], align 8
// CHECK14-NEXT: [[TMP86:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 2
// CHECK14-NEXT: store i8* null, i8** [[TMP86]], align 8
// CHECK14-NEXT: [[TMP87:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 3
// CHECK14-NEXT: [[TMP88:%.*]] = bitcast i8** [[TMP87]] to i64*
// CHECK14-NEXT: store i64 [[TMP67]], i64* [[TMP88]], align 8
// CHECK14-NEXT: [[TMP89:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 3
// CHECK14-NEXT: [[TMP90:%.*]] = bitcast i8** [[TMP89]] to i64*
// CHECK14-NEXT: store i64 [[TMP67]], i64* [[TMP90]], align 8
// CHECK14-NEXT: [[TMP91:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 3
// CHECK14-NEXT: store i64 4, i64* [[TMP91]], align 8
// CHECK14-NEXT: [[TMP92:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 3
// CHECK14-NEXT: store i8* null, i8** [[TMP92]], align 8
// CHECK14-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0
// CHECK14-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0
// CHECK14-NEXT: [[TMP95:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 0
// CHECK14-NEXT: [[TMP96:%.*]] = load i32, i32* [[N]], align 4
// CHECK14-NEXT: store i32 [[TMP96]], i32* [[DOTCAPTURE_EXPR_27]], align 4
// CHECK14-NEXT: [[TMP97:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_27]], align 4
// CHECK14-NEXT: [[SUB29:%.*]] = sub nsw i32 [[TMP97]], 0
// CHECK14-NEXT: [[DIV30:%.*]] = sdiv i32 [[SUB29]], 1
// CHECK14-NEXT: [[SUB31:%.*]] = sub nsw i32 [[DIV30]], 1
// CHECK14-NEXT: store i32 [[SUB31]], i32* [[DOTCAPTURE_EXPR_28]], align 4
// CHECK14-NEXT: [[TMP98:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_28]], align 4
// CHECK14-NEXT: [[ADD32:%.*]] = add nsw i32 [[TMP98]], 1
// CHECK14-NEXT: [[TMP99:%.*]] = zext i32 [[ADD32]] to i64
// CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP99]])
// CHECK14-NEXT: [[TMP100:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147.region_id, i32 4, i8** [[TMP93]], i8** [[TMP94]], i64* [[TMP95]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK14-NEXT: [[TMP101:%.*]] = icmp ne i32 [[TMP100]], 0
// CHECK14-NEXT: br i1 [[TMP101]], label [[OMP_OFFLOAD_FAILED33:%.*]], label [[OMP_OFFLOAD_CONT34:%.*]]
// CHECK14: omp_offload.failed33:
// CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147(i64 [[TMP65]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP67]]) #[[ATTR3]]
// CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT34]]
// CHECK14: omp_offload.cont34:
// CHECK14-NEXT: [[TMP102:%.*]] = load i32, i32* [[N]], align 4
// CHECK14-NEXT: [[CONV36:%.*]] = bitcast i64* [[N_CASTED35]] to i32*
// CHECK14-NEXT: store i32 [[TMP102]], i32* [[CONV36]], align 4
// CHECK14-NEXT: [[TMP103:%.*]] = load i64, i64* [[N_CASTED35]], align 8
// CHECK14-NEXT: [[TMP104:%.*]] = mul nuw i64 [[TMP1]], 4
// CHECK14-NEXT: [[TMP105:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 0
// CHECK14-NEXT: [[TMP106:%.*]] = bitcast i8** [[TMP105]] to i64*
// CHECK14-NEXT: store i64 [[TMP103]], i64* [[TMP106]], align 8
// CHECK14-NEXT: [[TMP107:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 0
// CHECK14-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to i64*
// CHECK14-NEXT: store i64 [[TMP103]], i64* [[TMP108]], align 8
// CHECK14-NEXT: [[TMP109:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 0
// CHECK14-NEXT: store i64 4, i64* [[TMP109]], align 8
// CHECK14-NEXT: [[TMP110:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS39]], i64 0, i64 0
// CHECK14-NEXT: store i8* null, i8** [[TMP110]], align 8
// CHECK14-NEXT: [[TMP111:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 1
// CHECK14-NEXT: [[TMP112:%.*]] = bitcast i8** [[TMP111]] to i64*
// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP112]], align 8
// CHECK14-NEXT: [[TMP113:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 1
// CHECK14-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to i64*
// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP114]], align 8
// CHECK14-NEXT: [[TMP115:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 1
// CHECK14-NEXT: store i64 8, i64* [[TMP115]], align 8
// CHECK14-NEXT: [[TMP116:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS39]], i64 0, i64 1
// CHECK14-NEXT: store i8* null, i8** [[TMP116]], align 8
// CHECK14-NEXT: [[TMP117:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 2
// CHECK14-NEXT: [[TMP118:%.*]] = bitcast i8** [[TMP117]] to i32**
// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP118]], align 8
// CHECK14-NEXT: [[TMP119:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 2
// CHECK14-NEXT: [[TMP120:%.*]] = bitcast i8** [[TMP119]] to i32**
// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP120]], align 8
// CHECK14-NEXT: [[TMP121:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 2
// CHECK14-NEXT: store i64 [[TMP104]], i64* [[TMP121]], align 8
// CHECK14-NEXT: [[TMP122:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS39]], i64 0, i64 2
// CHECK14-NEXT: store i8* null, i8** [[TMP122]], align 8
// CHECK14-NEXT: [[TMP123:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 0
// CHECK14-NEXT: [[TMP124:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 0
// CHECK14-NEXT: [[TMP125:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 0
// CHECK14-NEXT: [[TMP126:%.*]] = load i32, i32* [[N]], align 4
// CHECK14-NEXT: store i32 [[TMP126]], i32* [[DOTCAPTURE_EXPR_42]], align 4
// CHECK14-NEXT: [[TMP127:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_42]], align 4
// CHECK14-NEXT: [[SUB44:%.*]] = sub nsw i32 [[TMP127]], 0
// CHECK14-NEXT: [[DIV45:%.*]] = sdiv i32 [[SUB44]], 1
// CHECK14-NEXT: [[SUB46:%.*]] = sub nsw i32 [[DIV45]], 1
// CHECK14-NEXT: store i32 [[SUB46]], i32* [[DOTCAPTURE_EXPR_43]], align 4
// CHECK14-NEXT: [[TMP128:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_43]], align 4
// CHECK14-NEXT: [[ADD47:%.*]] = add nsw i32 [[TMP128]], 1
// CHECK14-NEXT: [[TMP129:%.*]] = zext i32 [[ADD47]] to i64
// CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP129]])
// CHECK14-NEXT: [[TMP130:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151.region_id, i32 3, i8** [[TMP123]], i8** [[TMP124]], i64* [[TMP125]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK14-NEXT: [[TMP131:%.*]] = icmp ne i32 [[TMP130]], 0
// CHECK14-NEXT: br i1 [[TMP131]], label [[OMP_OFFLOAD_FAILED48:%.*]], label [[OMP_OFFLOAD_CONT49:%.*]]
// CHECK14: omp_offload.failed48:
// CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151(i64 [[TMP103]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]]
// CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT49]]
// CHECK14: omp_offload.cont49:
// CHECK14-NEXT: [[TMP132:%.*]] = load i32, i32* [[M]], align 4
// CHECK14-NEXT: store i32 [[TMP132]], i32* [[DOTCAPTURE_EXPR_50]], align 4
// CHECK14-NEXT: [[TMP133:%.*]] = load i32, i32* [[N]], align 4
// CHECK14-NEXT: [[CONV52:%.*]] = bitcast i64* [[N_CASTED51]] to i32*
// CHECK14-NEXT: store i32 [[TMP133]], i32* [[CONV52]], align 4
// CHECK14-NEXT: [[TMP134:%.*]] = load i64, i64* [[N_CASTED51]], align 8
// CHECK14-NEXT: [[TMP135:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_50]], align 4
// CHECK14-NEXT: [[CONV54:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED53]] to i32*
// CHECK14-NEXT: store i32 [[TMP135]], i32* [[CONV54]], align 4
// CHECK14-NEXT: [[TMP136:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED53]], align 8
// CHECK14-NEXT: [[TMP137:%.*]] = mul nuw i64 [[TMP1]], 4
// CHECK14-NEXT: [[TMP138:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 0
// CHECK14-NEXT: [[TMP139:%.*]] = bitcast i8** [[TMP138]] to i64*
// CHECK14-NEXT: store i64 [[TMP134]], i64* [[TMP139]], align 8
// CHECK14-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 0
// CHECK14-NEXT: [[TMP141:%.*]] = bitcast i8** [[TMP140]] to i64*
// CHECK14-NEXT: store i64 [[TMP134]], i64* [[TMP141]], align 8
// CHECK14-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 0
// CHECK14-NEXT: store i64 4, i64* [[TMP142]], align 8
// CHECK14-NEXT: [[TMP143:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 0
// CHECK14-NEXT: store i8* null, i8** [[TMP143]], align 8
// CHECK14-NEXT: [[TMP144:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 1
// CHECK14-NEXT: [[TMP145:%.*]] = bitcast i8** [[TMP144]] to i64*
// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP145]], align 8
// CHECK14-NEXT: [[TMP146:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 1
// CHECK14-NEXT: [[TMP147:%.*]] = bitcast i8** [[TMP146]] to i64*
// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP147]], align 8
// CHECK14-NEXT: [[TMP148:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 1
// CHECK14-NEXT: store i64 8, i64* [[TMP148]], align 8
// CHECK14-NEXT: [[TMP149:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 1
// CHECK14-NEXT: store i8* null, i8** [[TMP149]], align 8
// CHECK14-NEXT: [[TMP150:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 2
// CHECK14-NEXT: [[TMP151:%.*]] = bitcast i8** [[TMP150]] to i32**
// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP151]], align 8
// CHECK14-NEXT: [[TMP152:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 2
// CHECK14-NEXT: [[TMP153:%.*]] = bitcast i8** [[TMP152]] to i32**
// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP153]], align 8
// CHECK14-NEXT: [[TMP154:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 2
// CHECK14-NEXT: store i64 [[TMP137]], i64* [[TMP154]], align 8
// CHECK14-NEXT: [[TMP155:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 2
// CHECK14-NEXT: store i8* null, i8** [[TMP155]], align 8
// CHECK14-NEXT: [[TMP156:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 3
// CHECK14-NEXT: [[TMP157:%.*]] = bitcast i8** [[TMP156]] to i64*
// CHECK14-NEXT: store i64 [[TMP136]], i64* [[TMP157]], align 8
// CHECK14-NEXT: [[TMP158:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 3
// CHECK14-NEXT: [[TMP159:%.*]] = bitcast i8** [[TMP158]] to i64*
// CHECK14-NEXT: store i64 [[TMP136]], i64* [[TMP159]], align 8
// CHECK14-NEXT: [[TMP160:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 3
// CHECK14-NEXT: store i64 4, i64* [[TMP160]], align 8
// CHECK14-NEXT: [[TMP161:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 3
// CHECK14-NEXT: store i8* null, i8** [[TMP161]], align 8
// CHECK14-NEXT: [[TMP162:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 0
// CHECK14-NEXT: [[TMP163:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 0
// CHECK14-NEXT: [[TMP164:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 0
// CHECK14-NEXT: [[TMP165:%.*]] = load i32, i32* [[N]], align 4
// CHECK14-NEXT: store i32 [[TMP165]], i32* [[DOTCAPTURE_EXPR_60]], align 4
// CHECK14-NEXT: [[TMP166:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_60]], align 4
// CHECK14-NEXT: [[SUB62:%.*]] = sub nsw i32 [[TMP166]], 0
// CHECK14-NEXT: [[DIV63:%.*]] = sdiv i32 [[SUB62]], 1
// CHECK14-NEXT: [[SUB64:%.*]] = sub nsw i32 [[DIV63]], 1
// CHECK14-NEXT: store i32 [[SUB64]], i32* [[DOTCAPTURE_EXPR_61]], align 4
// CHECK14-NEXT: [[TMP167:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_61]], align 4
// CHECK14-NEXT: [[ADD65:%.*]] = add nsw i32 [[TMP167]], 1
// CHECK14-NEXT: [[TMP168:%.*]] = zext i32 [[ADD65]] to i64
// CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP168]])
// CHECK14-NEXT: [[TMP169:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155.region_id, i32 4, i8** [[TMP162]], i8** [[TMP163]], i64* [[TMP164]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK14-NEXT: [[TMP170:%.*]] = icmp ne i32 [[TMP169]], 0
// CHECK14-NEXT: br i1 [[TMP170]], label [[OMP_OFFLOAD_FAILED66:%.*]], label [[OMP_OFFLOAD_CONT67:%.*]]
// CHECK14: omp_offload.failed66:
// CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155(i64 [[TMP134]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP136]]) #[[ATTR3]]
// CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT67]]
// CHECK14: omp_offload.cont67:
// CHECK14-NEXT: [[TMP171:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4
// CHECK14-NEXT: [[CALL:%.*]] = call signext i32 @_Z5tmainIiLi10EEiT_(i32 signext [[TMP171]])
// CHECK14-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4
// CHECK14-NEXT: [[TMP172:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
// CHECK14-NEXT: call void @llvm.stackrestore(i8* [[TMP172]])
// CHECK14-NEXT: [[TMP173:%.*]] = load i32, i32* [[RETVAL]], align 4
// CHECK14-NEXT: ret i32 [[TMP173]]
//
//
// CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139
// CHECK14-SAME: (i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2:[0-9]+]] {
// CHECK14-NEXT: entry:
// CHECK14-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK14-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
// CHECK14-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
// CHECK14-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK14-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK14-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
// CHECK14-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK14-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK14-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK14-NEXT: [[CONV1:%.*]] = bitcast i64* [[N_CASTED]] to i32*
// CHECK14-NEXT: store i32 [[TMP2]], i32* [[CONV1]], align 4
// CHECK14-NEXT: [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8
// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]])
// CHECK14-NEXT: ret void
//
//
// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK14-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK14-NEXT: entry:
// CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK14-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK14-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[I3:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
// CHECK14-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK14-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK14-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
// CHECK14-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK14-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK14-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
// CHECK14-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK14-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK14-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK14-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK14-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK14-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK14-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK14-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK14-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK14-NEXT: store i32 0, i32* [[I]], align 4
// CHECK14-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK14-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK14-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK14: omp.precond.then:
// CHECK14-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK14-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK14-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK14-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK14-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK14-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
// CHECK14-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK14-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK14-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
// CHECK14-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK14: cond.true:
// CHECK14-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK14-NEXT: br label [[COND_END:%.*]]
// CHECK14: cond.false:
// CHECK14-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: br label [[COND_END]]
// CHECK14: cond.end:
// CHECK14-NEXT: [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
// CHECK14-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK14-NEXT: store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK14: omp.inner.for.cond:
// CHECK14-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
// CHECK14-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK14: omp.inner.for.body:
// CHECK14-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK14-NEXT: [[TMP16:%.*]] = zext i32 [[TMP15]] to i64
// CHECK14-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
// CHECK14-NEXT: [[TMP19:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK14-NEXT: [[CONV6:%.*]] = bitcast i64* [[N_CASTED]] to i32*
// CHECK14-NEXT: store i32 [[TMP19]], i32* [[CONV6]], align 4
// CHECK14-NEXT: [[TMP20:%.*]] = load i64, i64* [[N_CASTED]], align 8
// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]])
// CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK14: omp.inner.for.inc:
// CHECK14-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
// CHECK14-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK14: omp.inner.for.end:
// CHECK14-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK14: omp.loop.exit:
// CHECK14-NEXT: [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK14-NEXT: [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
// CHECK14-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
// CHECK14-NEXT: br label [[OMP_PRECOND_END]]
// CHECK14: omp.precond.end:
// CHECK14-NEXT: ret void
//
//
// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK14-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK14-NEXT: entry:
// CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK14-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK14-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[I5:%.*]] = alloca i32, align 4
// CHECK14-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK14-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK14-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK14-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK14-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
// CHECK14-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK14-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK14-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
// CHECK14-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK14-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK14-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK14-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK14-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK14-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK14-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK14-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK14-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK14-NEXT: store i32 0, i32* [[I]], align 4
// CHECK14-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK14-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK14-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK14: omp.precond.then:
// CHECK14-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK14-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK14-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
// CHECK14-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK14-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP6]] to i32
// CHECK14-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK14-NEXT: [[CONV4:%.*]] = trunc i64 [[TMP7]] to i32
// CHECK14-NEXT: store i32 [[CONV3]], i32* [[DOTOMP_LB]], align 4
// CHECK14-NEXT: store i32 [[CONV4]], i32* [[DOTOMP_UB]], align 4
// CHECK14-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK14-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK14-NEXT: [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK14-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
// CHECK14-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP9]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK14-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK14-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK14-NEXT: [[CMP6:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
// CHECK14-NEXT: br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK14: cond.true:
// CHECK14-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK14-NEXT: br label [[COND_END:%.*]]
// CHECK14: cond.false:
// CHECK14-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK14-NEXT: br label [[COND_END]]
// CHECK14: cond.end:
// CHECK14-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
// CHECK14-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK14-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK14-NEXT: store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK14: omp.inner.for.cond:
// CHECK14-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK14-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
// CHECK14-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK14: omp.inner.for.body:
// CHECK14-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP17]], 1
// CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK14-NEXT: store i32 [[ADD]], i32* [[I5]], align 4
// CHECK14-NEXT: [[TMP18:%.*]] = load i32, i32* [[I5]], align 4
// CHECK14-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP18]] to i64
// CHECK14-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 [[IDXPROM]]
// CHECK14-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK14-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK14: omp.body.continue:
// CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK14: omp.inner.for.inc:
// CHECK14-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP19]], 1
// CHECK14-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK14: omp.inner.for.end:
// CHECK14-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK14: omp.loop.exit:
// CHECK14-NEXT: [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK14-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
// CHECK14-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
// CHECK14-NEXT: br label [[OMP_PRECOND_END]]
// CHECK14: omp.precond.end:
// CHECK14-NEXT: ret void
//
//
// CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143
// CHECK14-SAME: (i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK14-NEXT: entry:
// CHECK14-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK14-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
// CHECK14-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
// CHECK14-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK14-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK14-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
// CHECK14-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK14-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK14-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK14-NEXT: [[CONV1:%.*]] = bitcast i64* [[N_CASTED]] to i32*
// CHECK14-NEXT: store i32 [[TMP2]], i32* [[CONV1]], align 4
// CHECK14-NEXT: [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8
// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]])
// CHECK14-NEXT: ret void
//
//
// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..2
// CHECK14-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK14-NEXT: entry:
// CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK14-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK14-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[I3:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
// CHECK14-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK14-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK14-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
// CHECK14-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK14-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK14-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
// CHECK14-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK14-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK14-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK14-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK14-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK14-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK14-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK14-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK14-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK14-NEXT: store i32 0, i32* [[I]], align 4
// CHECK14-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK14-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK14-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK14: omp.precond.then:
// CHECK14-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK14-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK14-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK14-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK14-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK14-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
// CHECK14-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK14-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK14-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
// CHECK14-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK14: cond.true:
// CHECK14-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK14-NEXT: br label [[COND_END:%.*]]
// CHECK14: cond.false:
// CHECK14-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: br label [[COND_END]]
// CHECK14: cond.end:
// CHECK14-NEXT: [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
// CHECK14-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK14-NEXT: store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK14: omp.inner.for.cond:
// CHECK14-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
// CHECK14-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK14: omp.inner.for.body:
// CHECK14-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK14-NEXT: [[TMP16:%.*]] = zext i32 [[TMP15]] to i64
// CHECK14-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
// CHECK14-NEXT: [[TMP19:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK14-NEXT: [[CONV6:%.*]] = bitcast i64* [[N_CASTED]] to i32*
// CHECK14-NEXT: store i32 [[TMP19]], i32* [[CONV6]], align 4
// CHECK14-NEXT: [[TMP20:%.*]] = load i64, i64* [[N_CASTED]], align 8
// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]])
// CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK14: omp.inner.for.inc:
// CHECK14-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
// CHECK14-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK14: omp.inner.for.end:
// CHECK14-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK14: omp.loop.exit:
// CHECK14-NEXT: [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK14-NEXT: [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
// CHECK14-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
// CHECK14-NEXT: br label [[OMP_PRECOND_END]]
// CHECK14: omp.precond.end:
// CHECK14-NEXT: ret void
//
//
// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..3
// CHECK14-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK14-NEXT: entry:
// CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK14-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK14-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[I5:%.*]] = alloca i32, align 4
// CHECK14-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK14-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK14-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK14-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK14-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
// CHECK14-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK14-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK14-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
// CHECK14-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK14-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK14-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK14-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK14-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK14-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK14-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK14-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK14-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK14-NEXT: store i32 0, i32* [[I]], align 4
// CHECK14-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK14-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK14-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK14: omp.precond.then:
// CHECK14-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK14-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK14-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
// CHECK14-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK14-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP6]] to i32
// CHECK14-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK14-NEXT: [[CONV4:%.*]] = trunc i64 [[TMP7]] to i32
// CHECK14-NEXT: store i32 [[CONV3]], i32* [[DOTOMP_LB]], align 4
// CHECK14-NEXT: store i32 [[CONV4]], i32* [[DOTOMP_UB]], align 4
// CHECK14-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK14-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK14-NEXT: [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK14-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
// CHECK14-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP9]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK14-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK14-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK14-NEXT: [[CMP6:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
// CHECK14-NEXT: br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK14: cond.true:
// CHECK14-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK14-NEXT: br label [[COND_END:%.*]]
// CHECK14: cond.false:
// CHECK14-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK14-NEXT: br label [[COND_END]]
// CHECK14: cond.end:
// CHECK14-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
// CHECK14-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK14-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK14-NEXT: store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK14: omp.inner.for.cond:
// CHECK14-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK14-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
// CHECK14-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK14: omp.inner.for.body:
// CHECK14-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP17]], 1
// CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK14-NEXT: store i32 [[ADD]], i32* [[I5]], align 4
// CHECK14-NEXT: [[TMP18:%.*]] = load i32, i32* [[I5]], align 4
// CHECK14-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP18]] to i64
// CHECK14-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 [[IDXPROM]]
// CHECK14-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK14-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK14: omp.body.continue:
// CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK14: omp.inner.for.inc:
// CHECK14-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP19]], 1
// CHECK14-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK14: omp.inner.for.end:
// CHECK14-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK14: omp.loop.exit:
// CHECK14-NEXT: [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK14-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
// CHECK14-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
// CHECK14-NEXT: br label [[OMP_PRECOND_END]]
// CHECK14: omp.precond.end:
// CHECK14-NEXT: ret void
//
//
// CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147
// CHECK14-SAME: (i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK14-NEXT: entry:
// CHECK14-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK14-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
// CHECK14-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
// CHECK14-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK14-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK14-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
// CHECK14-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
// CHECK14-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK14-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK14-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
// CHECK14-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK14-NEXT: [[CONV2:%.*]] = bitcast i64* [[N_CASTED]] to i32*
// CHECK14-NEXT: store i32 [[TMP2]], i32* [[CONV2]], align 4
// CHECK14-NEXT: [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8
// CHECK14-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV1]], align 8
// CHECK14-NEXT: [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
// CHECK14-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4
// CHECK14-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]])
// CHECK14-NEXT: ret void
//
//
// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..5
// CHECK14-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK14-NEXT: entry:
// CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK14-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK14-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK14-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[I5:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
// CHECK14-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK14-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK14-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
// CHECK14-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK14-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK14-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
// CHECK14-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
// CHECK14-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK14-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK14-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
// CHECK14-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK14-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK14-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK14-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK14-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK14-NEXT: [[SUB4:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK14-NEXT: store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK14-NEXT: store i32 0, i32* [[I]], align 4
// CHECK14-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK14-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK14-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK14: omp.precond.then:
// CHECK14-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK14-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK14-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK14-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK14-NEXT: [[TMP6:%.*]] = load i32, i32* [[CONV1]], align 8
// CHECK14-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK14-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
// CHECK14-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP6]])
// CHECK14-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK14-NEXT: [[CMP6:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
// CHECK14-NEXT: br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK14: cond.true:
// CHECK14-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK14-NEXT: br label [[COND_END:%.*]]
// CHECK14: cond.false:
// CHECK14-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: br label [[COND_END]]
// CHECK14: cond.end:
// CHECK14-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
// CHECK14-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK14-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK14: omp.inner.for.cond:
// CHECK14-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP15]], 1
// CHECK14-NEXT: [[CMP7:%.*]] = icmp slt i32 [[TMP14]], [[ADD]]
// CHECK14-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK14: omp.inner.for.body:
// CHECK14-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK14-NEXT: [[TMP17:%.*]] = zext i32 [[TMP16]] to i64
// CHECK14-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: [[TMP19:%.*]] = zext i32 [[TMP18]] to i64
// CHECK14-NEXT: [[TMP20:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK14-NEXT: [[CONV8:%.*]] = bitcast i64* [[N_CASTED]] to i32*
// CHECK14-NEXT: store i32 [[TMP20]], i32* [[CONV8]], align 4
// CHECK14-NEXT: [[TMP21:%.*]] = load i64, i64* [[N_CASTED]], align 8
// CHECK14-NEXT: [[TMP22:%.*]] = load i32, i32* [[CONV1]], align 8
// CHECK14-NEXT: [[CONV9:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
// CHECK14-NEXT: store i32 [[TMP22]], i32* [[CONV9]], align 4
// CHECK14-NEXT: [[TMP23:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*, i64)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i64 [[TMP21]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP23]])
// CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK14: omp.inner.for.inc:
// CHECK14-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK14-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP24]], [[TMP25]]
// CHECK14-NEXT: store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: [[TMP26:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK14-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK14-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP26]], [[TMP27]]
// CHECK14-NEXT: store i32 [[ADD11]], i32* [[DOTOMP_COMB_LB]], align 4
// CHECK14-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK14-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP28]], [[TMP29]]
// CHECK14-NEXT: store i32 [[ADD12]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK14-NEXT: [[CMP13:%.*]] = icmp sgt i32 [[TMP30]], [[TMP31]]
// CHECK14-NEXT: br i1 [[CMP13]], label [[COND_TRUE14:%.*]], label [[COND_FALSE15:%.*]]
// CHECK14: cond.true14:
// CHECK14-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK14-NEXT: br label [[COND_END16:%.*]]
// CHECK14: cond.false15:
// CHECK14-NEXT: [[TMP33:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: br label [[COND_END16]]
// CHECK14: cond.end16:
// CHECK14-NEXT: [[COND17:%.*]] = phi i32 [ [[TMP32]], [[COND_TRUE14]] ], [ [[TMP33]], [[COND_FALSE15]] ]
// CHECK14-NEXT: store i32 [[COND17]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: [[TMP34:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK14-NEXT: store i32 [[TMP34]], i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK14: omp.inner.for.end:
// CHECK14-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK14: omp.loop.exit:
// CHECK14-NEXT: [[TMP35:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK14-NEXT: [[TMP36:%.*]] = load i32, i32* [[TMP35]], align 4
// CHECK14-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP36]])
// CHECK14-NEXT: br label [[OMP_PRECOND_END]]
// CHECK14: omp.precond.end:
// CHECK14-NEXT: ret void
//
//
// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..6
// CHECK14-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK14-NEXT: entry:
// CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK14-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK14-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK14-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[I7:%.*]] = alloca i32, align 4
// CHECK14-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK14-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK14-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK14-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK14-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
// CHECK14-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK14-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK14-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
// CHECK14-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
// CHECK14-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK14-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK14-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
// CHECK14-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK14-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK14-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK14-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK14-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK14-NEXT: [[SUB4:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK14-NEXT: store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK14-NEXT: store i32 0, i32* [[I]], align 4
// CHECK14-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK14-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK14-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK14: omp.precond.then:
// CHECK14-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK14-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK14-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
// CHECK14-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK14-NEXT: [[CONV5:%.*]] = trunc i64 [[TMP6]] to i32
// CHECK14-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK14-NEXT: [[CONV6:%.*]] = trunc i64 [[TMP7]] to i32
// CHECK14-NEXT: store i32 [[CONV5]], i32* [[DOTOMP_LB]], align 4
// CHECK14-NEXT: store i32 [[CONV6]], i32* [[DOTOMP_UB]], align 4
// CHECK14-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK14-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK14-NEXT: [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK14-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
// CHECK14-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP9]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK14-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK14-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK14-NEXT: [[CMP8:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
// CHECK14-NEXT: br i1 [[CMP8]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK14: cond.true:
// CHECK14-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK14-NEXT: br label [[COND_END:%.*]]
// CHECK14: cond.false:
// CHECK14-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK14-NEXT: br label [[COND_END]]
// CHECK14: cond.end:
// CHECK14-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
// CHECK14-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK14-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK14-NEXT: store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK14: omp.inner.for.cond:
// CHECK14-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK14-NEXT: [[CMP9:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
// CHECK14-NEXT: br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK14: omp.inner.for.body:
// CHECK14-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP17]], 1
// CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK14-NEXT: store i32 [[ADD]], i32* [[I7]], align 4
// CHECK14-NEXT: [[TMP18:%.*]] = load i32, i32* [[I7]], align 4
// CHECK14-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP18]] to i64
// CHECK14-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 [[IDXPROM]]
// CHECK14-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK14-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK14: omp.body.continue:
// CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK14: omp.inner.for.inc:
// CHECK14-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP19]], 1
// CHECK14-NEXT: store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK14: omp.inner.for.end:
// CHECK14-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK14: omp.loop.exit:
// CHECK14-NEXT: [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK14-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
// CHECK14-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
// CHECK14-NEXT: br label [[OMP_PRECOND_END]]
// CHECK14: omp.precond.end:
// CHECK14-NEXT: ret void
//
//
// CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151
// CHECK14-SAME: (i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK14-NEXT: entry:
// CHECK14-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK14-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
// CHECK14-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
// CHECK14-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK14-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK14-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
// CHECK14-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK14-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK14-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK14-NEXT: [[CONV1:%.*]] = bitcast i64* [[N_CASTED]] to i32*
// CHECK14-NEXT: store i32 [[TMP2]], i32* [[CONV1]], align 4
// CHECK14-NEXT: [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8
// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]])
// CHECK14-NEXT: ret void
//
//
// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..8
// CHECK14-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK14-NEXT: entry:
// CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK14-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK14-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[I3:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
// CHECK14-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK14-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK14-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
// CHECK14-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK14-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK14-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
// CHECK14-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK14-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK14-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK14-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK14-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK14-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK14-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK14-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK14-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK14-NEXT: store i32 0, i32* [[I]], align 4
// CHECK14-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK14-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK14-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK14: omp.precond.then:
// CHECK14-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK14-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK14-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK14-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK14-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK14-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
// CHECK14-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK14-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK14-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
// CHECK14-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK14: cond.true:
// CHECK14-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK14-NEXT: br label [[COND_END:%.*]]
// CHECK14: cond.false:
// CHECK14-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: br label [[COND_END]]
// CHECK14: cond.end:
// CHECK14-NEXT: [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
// CHECK14-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK14-NEXT: store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK14: omp.inner.for.cond:
// CHECK14-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
// CHECK14-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK14: omp.inner.for.body:
// CHECK14-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK14-NEXT: [[TMP16:%.*]] = zext i32 [[TMP15]] to i64
// CHECK14-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
// CHECK14-NEXT: [[TMP19:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK14-NEXT: [[CONV6:%.*]] = bitcast i64* [[N_CASTED]] to i32*
// CHECK14-NEXT: store i32 [[TMP19]], i32* [[CONV6]], align 4
// CHECK14-NEXT: [[TMP20:%.*]] = load i64, i64* [[N_CASTED]], align 8
// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]])
// CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK14: omp.inner.for.inc:
// CHECK14-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
// CHECK14-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK14: omp.inner.for.end:
// CHECK14-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK14: omp.loop.exit:
// CHECK14-NEXT: [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK14-NEXT: [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
// CHECK14-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
// CHECK14-NEXT: br label [[OMP_PRECOND_END]]
// CHECK14: omp.precond.end:
// CHECK14-NEXT: ret void
//
//
// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..9
// CHECK14-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK14-NEXT: entry:
// CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK14-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK14-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[I5:%.*]] = alloca i32, align 4
// CHECK14-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK14-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK14-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK14-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK14-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
// CHECK14-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK14-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK14-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
// CHECK14-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK14-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK14-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK14-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK14-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK14-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK14-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK14-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK14-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK14-NEXT: store i32 0, i32* [[I]], align 4
// CHECK14-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK14-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK14-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK14: omp.precond.then:
// CHECK14-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK14-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK14-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
// CHECK14-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK14-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP6]] to i32
// CHECK14-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK14-NEXT: [[CONV4:%.*]] = trunc i64 [[TMP7]] to i32
// CHECK14-NEXT: store i32 [[CONV3]], i32* [[DOTOMP_LB]], align 4
// CHECK14-NEXT: store i32 [[CONV4]], i32* [[DOTOMP_UB]], align 4
// CHECK14-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK14-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK14-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK14-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK14-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK14-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
// CHECK14-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP11]], i32 35, i32 [[TMP8]], i32 [[TMP9]], i32 1, i32 1)
// CHECK14-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK14: omp.dispatch.cond:
// CHECK14-NEXT: [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK14-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
// CHECK14-NEXT: [[TMP14:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP13]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
// CHECK14-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP14]], 0
// CHECK14-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK14: omp.dispatch.body:
// CHECK14-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK14-NEXT: store i32 [[TMP15]], i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK14: omp.inner.for.cond:
// CHECK14-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15
// CHECK14-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !15
// CHECK14-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
// CHECK14-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK14: omp.inner.for.body:
// CHECK14-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15
// CHECK14-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
// CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK14-NEXT: store i32 [[ADD]], i32* [[I5]], align 4, !llvm.access.group !15
// CHECK14-NEXT: [[TMP19:%.*]] = load i32, i32* [[I5]], align 4, !llvm.access.group !15
// CHECK14-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP19]] to i64
// CHECK14-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 [[IDXPROM]]
// CHECK14-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !15
// CHECK14-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK14: omp.body.continue:
// CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK14: omp.inner.for.inc:
// CHECK14-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15
// CHECK14-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP20]], 1
// CHECK14-NEXT: store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15
// CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]]
// CHECK14: omp.inner.for.end:
// CHECK14-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK14: omp.dispatch.inc:
// CHECK14-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK14: omp.dispatch.end:
// CHECK14-NEXT: br label [[OMP_PRECOND_END]]
// CHECK14: omp.precond.end:
// CHECK14-NEXT: ret void
//
//
// CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155
// CHECK14-SAME: (i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK14-NEXT: entry:
// CHECK14-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK14-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
// CHECK14-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
// CHECK14-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK14-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK14-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
// CHECK14-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
// CHECK14-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK14-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK14-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
// CHECK14-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK14-NEXT: [[CONV2:%.*]] = bitcast i64* [[N_CASTED]] to i32*
// CHECK14-NEXT: store i32 [[TMP2]], i32* [[CONV2]], align 4
// CHECK14-NEXT: [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8
// CHECK14-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV1]], align 8
// CHECK14-NEXT: [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
// CHECK14-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4
// CHECK14-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]])
// CHECK14-NEXT: ret void
//
//
// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..11
// CHECK14-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK14-NEXT: entry:
// CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK14-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK14-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK14-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[I5:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
// CHECK14-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK14-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK14-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
// CHECK14-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK14-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK14-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
// CHECK14-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
// CHECK14-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK14-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK14-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
// CHECK14-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK14-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK14-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK14-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK14-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK14-NEXT: [[SUB4:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK14-NEXT: store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK14-NEXT: store i32 0, i32* [[I]], align 4
// CHECK14-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK14-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK14-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK14: omp.precond.then:
// CHECK14-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK14-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK14-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK14-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK14-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK14-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
// CHECK14-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK14-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK14-NEXT: [[CMP6:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
// CHECK14-NEXT: br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK14: cond.true:
// CHECK14-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK14-NEXT: br label [[COND_END:%.*]]
// CHECK14: cond.false:
// CHECK14-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: br label [[COND_END]]
// CHECK14: cond.end:
// CHECK14-NEXT: [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
// CHECK14-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK14-NEXT: store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK14: omp.inner.for.cond:
// CHECK14-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
// CHECK14-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK14: omp.inner.for.body:
// CHECK14-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK14-NEXT: [[TMP16:%.*]] = zext i32 [[TMP15]] to i64
// CHECK14-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
// CHECK14-NEXT: [[TMP19:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK14-NEXT: [[CONV8:%.*]] = bitcast i64* [[N_CASTED]] to i32*
// CHECK14-NEXT: store i32 [[TMP19]], i32* [[CONV8]], align 4
// CHECK14-NEXT: [[TMP20:%.*]] = load i64, i64* [[N_CASTED]], align 8
// CHECK14-NEXT: [[TMP21:%.*]] = load i32, i32* [[CONV1]], align 8
// CHECK14-NEXT: [[CONV9:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
// CHECK14-NEXT: store i32 [[TMP21]], i32* [[CONV9]], align 4
// CHECK14-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*, i64)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP22]])
// CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK14: omp.inner.for.inc:
// CHECK14-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
// CHECK14-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK14: omp.inner.for.end:
// CHECK14-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK14: omp.loop.exit:
// CHECK14-NEXT: [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK14-NEXT: [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
// CHECK14-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP26]])
// CHECK14-NEXT: br label [[OMP_PRECOND_END]]
// CHECK14: omp.precond.end:
// CHECK14-NEXT: ret void
//
//
// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..12
// CHECK14-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK14-NEXT: entry:
// CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK14-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK14-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK14-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[I7:%.*]] = alloca i32, align 4
// CHECK14-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK14-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK14-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK14-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK14-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
// CHECK14-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK14-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK14-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
// CHECK14-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
// CHECK14-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK14-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK14-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
// CHECK14-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK14-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK14-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK14-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK14-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK14-NEXT: [[SUB4:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK14-NEXT: store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK14-NEXT: store i32 0, i32* [[I]], align 4
// CHECK14-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK14-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK14-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK14: omp.precond.then:
// CHECK14-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK14-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK14-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
// CHECK14-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK14-NEXT: [[CONV5:%.*]] = trunc i64 [[TMP6]] to i32
// CHECK14-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK14-NEXT: [[CONV6:%.*]] = trunc i64 [[TMP7]] to i32
// CHECK14-NEXT: store i32 [[CONV5]], i32* [[DOTOMP_LB]], align 4
// CHECK14-NEXT: store i32 [[CONV6]], i32* [[DOTOMP_UB]], align 4
// CHECK14-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK14-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK14-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV1]], align 8
// CHECK14-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK14-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK14-NEXT: [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK14-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
// CHECK14-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP12]], i32 35, i32 [[TMP9]], i32 [[TMP10]], i32 1, i32 [[TMP8]])
// CHECK14-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK14: omp.dispatch.cond:
// CHECK14-NEXT: [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK14-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
// CHECK14-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
// CHECK14-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP15]], 0
// CHECK14-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK14: omp.dispatch.body:
// CHECK14-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK14-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK14: omp.inner.for.cond:
// CHECK14-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
// CHECK14-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !18
// CHECK14-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
// CHECK14-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK14: omp.inner.for.body:
// CHECK14-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
// CHECK14-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
// CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK14-NEXT: store i32 [[ADD]], i32* [[I7]], align 4, !llvm.access.group !18
// CHECK14-NEXT: [[TMP20:%.*]] = load i32, i32* [[I7]], align 4, !llvm.access.group !18
// CHECK14-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP20]] to i64
// CHECK14-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 [[IDXPROM]]
// CHECK14-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !18
// CHECK14-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK14: omp.body.continue:
// CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK14: omp.inner.for.inc:
// CHECK14-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
// CHECK14-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP21]], 1
// CHECK14-NEXT: store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
// CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
// CHECK14: omp.inner.for.end:
// CHECK14-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK14: omp.dispatch.inc:
// CHECK14-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK14: omp.dispatch.end:
// CHECK14-NEXT: br label [[OMP_PRECOND_END]]
// CHECK14: omp.precond.end:
// CHECK14-NEXT: ret void
//
//
// CHECK14-LABEL: define {{[^@]+}}@_Z5tmainIiLi10EEiT_
// CHECK14-SAME: (i32 signext [[ARGC:%.*]]) #[[ATTR4:[0-9]+]] comdat {
// CHECK14-NEXT: entry:
// CHECK14-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[A:%.*]] = alloca [10 x i32], align 4
// CHECK14-NEXT: [[M:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8
// CHECK14-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8
// CHECK14-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8
// CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOFFLOAD_BASEPTRS1:%.*]] = alloca [1 x i8*], align 8
// CHECK14-NEXT: [[DOTOFFLOAD_PTRS2:%.*]] = alloca [1 x i8*], align 8
// CHECK14-NEXT: [[DOTOFFLOAD_MAPPERS3:%.*]] = alloca [1 x i8*], align 8
// CHECK14-NEXT: [[_TMP4:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[DOTOFFLOAD_BASEPTRS7:%.*]] = alloca [2 x i8*], align 8
// CHECK14-NEXT: [[DOTOFFLOAD_PTRS8:%.*]] = alloca [2 x i8*], align 8
// CHECK14-NEXT: [[DOTOFFLOAD_MAPPERS9:%.*]] = alloca [2 x i8*], align 8
// CHECK14-NEXT: [[_TMP10:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOFFLOAD_BASEPTRS13:%.*]] = alloca [1 x i8*], align 8
// CHECK14-NEXT: [[DOTOFFLOAD_PTRS14:%.*]] = alloca [1 x i8*], align 8
// CHECK14-NEXT: [[DOTOFFLOAD_MAPPERS15:%.*]] = alloca [1 x i8*], align 8
// CHECK14-NEXT: [[_TMP16:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTCAPTURE_EXPR_19:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTCAPTURE_EXPR__CASTED20:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[DOTOFFLOAD_BASEPTRS22:%.*]] = alloca [2 x i8*], align 8
// CHECK14-NEXT: [[DOTOFFLOAD_PTRS23:%.*]] = alloca [2 x i8*], align 8
// CHECK14-NEXT: [[DOTOFFLOAD_MAPPERS24:%.*]] = alloca [2 x i8*], align 8
// CHECK14-NEXT: [[_TMP25:%.*]] = alloca i32, align 4
// CHECK14-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
// CHECK14-NEXT: store i32 10, i32* [[M]], align 4
// CHECK14-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK14-NEXT: [[TMP1:%.*]] = bitcast i8** [[TMP0]] to [10 x i32]**
// CHECK14-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP1]], align 8
// CHECK14-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK14-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to [10 x i32]**
// CHECK14-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP3]], align 8
// CHECK14-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
// CHECK14-NEXT: store i8* null, i8** [[TMP4]], align 8
// CHECK14-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK14-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
// CHECK14-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK14-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0
// CHECK14-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK14: omp_offload.failed:
// CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112([10 x i32]* [[A]]) #[[ATTR3]]
// CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK14: omp_offload.cont:
// CHECK14-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
// CHECK14-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to [10 x i32]**
// CHECK14-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP10]], align 8
// CHECK14-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
// CHECK14-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to [10 x i32]**
// CHECK14-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP12]], align 8
// CHECK14-NEXT: [[TMP13:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i64 0, i64 0
// CHECK14-NEXT: store i8* null, i8** [[TMP13]], align 8
// CHECK14-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
// CHECK14-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
// CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
// CHECK14-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK14-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
// CHECK14-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]]
// CHECK14: omp_offload.failed5:
// CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116([10 x i32]* [[A]]) #[[ATTR3]]
// CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT6]]
// CHECK14: omp_offload.cont6:
// CHECK14-NEXT: [[TMP18:%.*]] = load i32, i32* [[M]], align 4
// CHECK14-NEXT: store i32 [[TMP18]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK14-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK14-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
// CHECK14-NEXT: store i32 [[TMP19]], i32* [[CONV]], align 4
// CHECK14-NEXT: [[TMP20:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
// CHECK14-NEXT: [[TMP21:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0
// CHECK14-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to [10 x i32]**
// CHECK14-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP22]], align 8
// CHECK14-NEXT: [[TMP23:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0
// CHECK14-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to [10 x i32]**
// CHECK14-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP24]], align 8
// CHECK14-NEXT: [[TMP25:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i64 0, i64 0
// CHECK14-NEXT: store i8* null, i8** [[TMP25]], align 8
// CHECK14-NEXT: [[TMP26:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 1
// CHECK14-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i64*
// CHECK14-NEXT: store i64 [[TMP20]], i64* [[TMP27]], align 8
// CHECK14-NEXT: [[TMP28:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 1
// CHECK14-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64*
// CHECK14-NEXT: store i64 [[TMP20]], i64* [[TMP29]], align 8
// CHECK14-NEXT: [[TMP30:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i64 0, i64 1
// CHECK14-NEXT: store i8* null, i8** [[TMP30]], align 8
// CHECK14-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0
// CHECK14-NEXT: [[TMP32:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0
// CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
// CHECK14-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.23, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.24, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK14-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0
// CHECK14-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]]
// CHECK14: omp_offload.failed11:
// CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120([10 x i32]* [[A]], i64 [[TMP20]]) #[[ATTR3]]
// CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT12]]
// CHECK14: omp_offload.cont12:
// CHECK14-NEXT: [[TMP35:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0
// CHECK14-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to [10 x i32]**
// CHECK14-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP36]], align 8
// CHECK14-NEXT: [[TMP37:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0
// CHECK14-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to [10 x i32]**
// CHECK14-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP38]], align 8
// CHECK14-NEXT: [[TMP39:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i64 0, i64 0
// CHECK14-NEXT: store i8* null, i8** [[TMP39]], align 8
// CHECK14-NEXT: [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0
// CHECK14-NEXT: [[TMP41:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0
// CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
// CHECK14-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124.region_id, i32 1, i8** [[TMP40]], i8** [[TMP41]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.27, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.28, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK14-NEXT: [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0
// CHECK14-NEXT: br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]]
// CHECK14: omp_offload.failed17:
// CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124([10 x i32]* [[A]]) #[[ATTR3]]
// CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT18]]
// CHECK14: omp_offload.cont18:
// CHECK14-NEXT: [[TMP44:%.*]] = load i32, i32* [[M]], align 4
// CHECK14-NEXT: store i32 [[TMP44]], i32* [[DOTCAPTURE_EXPR_19]], align 4
// CHECK14-NEXT: [[TMP45:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_19]], align 4
// CHECK14-NEXT: [[CONV21:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED20]] to i32*
// CHECK14-NEXT: store i32 [[TMP45]], i32* [[CONV21]], align 4
// CHECK14-NEXT: [[TMP46:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED20]], align 8
// CHECK14-NEXT: [[TMP47:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0
// CHECK14-NEXT: [[TMP48:%.*]] = bitcast i8** [[TMP47]] to [10 x i32]**
// CHECK14-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP48]], align 8
// CHECK14-NEXT: [[TMP49:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0
// CHECK14-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to [10 x i32]**
// CHECK14-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP50]], align 8
// CHECK14-NEXT: [[TMP51:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 0
// CHECK14-NEXT: store i8* null, i8** [[TMP51]], align 8
// CHECK14-NEXT: [[TMP52:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 1
// CHECK14-NEXT: [[TMP53:%.*]] = bitcast i8** [[TMP52]] to i64*
// CHECK14-NEXT: store i64 [[TMP46]], i64* [[TMP53]], align 8
// CHECK14-NEXT: [[TMP54:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 1
// CHECK14-NEXT: [[TMP55:%.*]] = bitcast i8** [[TMP54]] to i64*
// CHECK14-NEXT: store i64 [[TMP46]], i64* [[TMP55]], align 8
// CHECK14-NEXT: [[TMP56:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 1
// CHECK14-NEXT: store i8* null, i8** [[TMP56]], align 8
// CHECK14-NEXT: [[TMP57:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0
// CHECK14-NEXT: [[TMP58:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0
// CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
// CHECK14-NEXT: [[TMP59:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128.region_id, i32 2, i8** [[TMP57]], i8** [[TMP58]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.31, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.32, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK14-NEXT: [[TMP60:%.*]] = icmp ne i32 [[TMP59]], 0
// CHECK14-NEXT: br i1 [[TMP60]], label [[OMP_OFFLOAD_FAILED26:%.*]], label [[OMP_OFFLOAD_CONT27:%.*]]
// CHECK14: omp_offload.failed26:
// CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128([10 x i32]* [[A]], i64 [[TMP46]]) #[[ATTR3]]
// CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT27]]
// CHECK14: omp_offload.cont27:
// CHECK14-NEXT: ret i32 0
//
//
// CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112
// CHECK14-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK14-NEXT: entry:
// CHECK14-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK14-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
// CHECK14-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]])
// CHECK14-NEXT: ret void
//
//
// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..14
// CHECK14-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK14-NEXT: entry:
// CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK14-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK14-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK14-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK14-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK14-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
// CHECK14-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
// CHECK14-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK14-NEXT: store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK14-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK14-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK14-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK14-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK14-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
// CHECK14-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK14: cond.true:
// CHECK14-NEXT: br label [[COND_END:%.*]]
// CHECK14: cond.false:
// CHECK14-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: br label [[COND_END]]
// CHECK14: cond.end:
// CHECK14-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK14-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK14-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK14: omp.inner.for.cond:
// CHECK14-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK14-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK14: omp.inner.for.body:
// CHECK14-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK14-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
// CHECK14-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]])
// CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK14: omp.inner.for.inc:
// CHECK14-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
// CHECK14-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK14: omp.inner.for.end:
// CHECK14-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK14: omp.loop.exit:
// CHECK14-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK14-NEXT: ret void
//
//
// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..15
// CHECK14-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK14-NEXT: entry:
// CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK14-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK14-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK14-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK14-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK14-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK14-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK14-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
// CHECK14-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
// CHECK14-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK14-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4
// CHECK14-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK14-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
// CHECK14-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK14-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
// CHECK14-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
// CHECK14-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
// CHECK14-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK14-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK14-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK14-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK14-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK14-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK14-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 9
// CHECK14-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK14: cond.true:
// CHECK14-NEXT: br label [[COND_END:%.*]]
// CHECK14: cond.false:
// CHECK14-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK14-NEXT: br label [[COND_END]]
// CHECK14: cond.end:
// CHECK14-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
// CHECK14-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK14-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK14-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK14: omp.inner.for.cond:
// CHECK14-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK14-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
// CHECK14-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK14: omp.inner.for.body:
// CHECK14-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
// CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK14-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK14-NEXT: [[TMP11:%.*]] = load i32, i32* [[I]], align 4
// CHECK14-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
// CHECK14-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]]
// CHECK14-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK14-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK14: omp.body.continue:
// CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK14: omp.inner.for.inc:
// CHECK14-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
// CHECK14-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK14: omp.inner.for.end:
// CHECK14-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK14: omp.loop.exit:
// CHECK14-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
// CHECK14-NEXT: ret void
//
//
// CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116
// CHECK14-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK14-NEXT: entry:
// CHECK14-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK14-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
// CHECK14-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..17 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]])
// CHECK14-NEXT: ret void
//
//
// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..17
// CHECK14-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK14-NEXT: entry:
// CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK14-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK14-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK14-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK14-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK14-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
// CHECK14-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
// CHECK14-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK14-NEXT: store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK14-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK14-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK14-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK14-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK14-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
// CHECK14-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK14: cond.true:
// CHECK14-NEXT: br label [[COND_END:%.*]]
// CHECK14: cond.false:
// CHECK14-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: br label [[COND_END]]
// CHECK14: cond.end:
// CHECK14-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK14-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK14-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK14: omp.inner.for.cond:
// CHECK14-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK14-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK14: omp.inner.for.body:
// CHECK14-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK14-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
// CHECK14-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]])
// CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK14: omp.inner.for.inc:
// CHECK14-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
// CHECK14-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK14: omp.inner.for.end:
// CHECK14-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK14: omp.loop.exit:
// CHECK14-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK14-NEXT: ret void
//
//
// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..18
// CHECK14-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK14-NEXT: entry:
// CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK14-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK14-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK14-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK14-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK14-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK14-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK14-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
// CHECK14-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
// CHECK14-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK14-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4
// CHECK14-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK14-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
// CHECK14-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK14-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
// CHECK14-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
// CHECK14-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
// CHECK14-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK14-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK14-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK14-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK14-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK14-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK14-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 9
// CHECK14-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK14: cond.true:
// CHECK14-NEXT: br label [[COND_END:%.*]]
// CHECK14: cond.false:
// CHECK14-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK14-NEXT: br label [[COND_END]]
// CHECK14: cond.end:
// CHECK14-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
// CHECK14-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK14-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK14-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK14: omp.inner.for.cond:
// CHECK14-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK14-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
// CHECK14-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK14: omp.inner.for.body:
// CHECK14-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
// CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK14-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK14-NEXT: [[TMP11:%.*]] = load i32, i32* [[I]], align 4
// CHECK14-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
// CHECK14-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]]
// CHECK14-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK14-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK14: omp.body.continue:
// CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK14: omp.inner.for.inc:
// CHECK14-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
// CHECK14-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK14: omp.inner.for.end:
// CHECK14-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK14: omp.loop.exit:
// CHECK14-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
// CHECK14-NEXT: ret void
//
//
// CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120
// CHECK14-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK14-NEXT: entry:
// CHECK14-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK14-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
// CHECK14-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
// CHECK14-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
// CHECK14-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
// CHECK14-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
// CHECK14-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK14-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
// CHECK14-NEXT: store i32 [[TMP1]], i32* [[CONV1]], align 4
// CHECK14-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..21 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP2]])
// CHECK14-NEXT: ret void
//
//
// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..21
// CHECK14-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK14-NEXT: entry:
// CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK14-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK14-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK14-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
// CHECK14-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK14-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK14-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
// CHECK14-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
// CHECK14-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
// CHECK14-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
// CHECK14-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK14-NEXT: store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK14-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK14-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK14-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK14-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK14-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
// CHECK14-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK14: cond.true:
// CHECK14-NEXT: br label [[COND_END:%.*]]
// CHECK14: cond.false:
// CHECK14-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: br label [[COND_END]]
// CHECK14: cond.end:
// CHECK14-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK14-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK14-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK14: omp.inner.for.cond:
// CHECK14-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK14-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK14: omp.inner.for.body:
// CHECK14-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK14-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
// CHECK14-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
// CHECK14-NEXT: [[TMP12:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK14-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
// CHECK14-NEXT: store i32 [[TMP12]], i32* [[CONV2]], align 4
// CHECK14-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]])
// CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK14: omp.inner.for.inc:
// CHECK14-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]]
// CHECK14-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK14: omp.inner.for.end:
// CHECK14-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK14: omp.loop.exit:
// CHECK14-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK14-NEXT: ret void
//
//
// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..22
// CHECK14-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK14-NEXT: entry:
// CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK14-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK14-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK14-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK14-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK14-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK14-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK14-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK14-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
// CHECK14-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
// CHECK14-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
// CHECK14-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
// CHECK14-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK14-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4
// CHECK14-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK14-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
// CHECK14-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK14-NEXT: [[CONV2:%.*]] = trunc i64 [[TMP2]] to i32
// CHECK14-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_LB]], align 4
// CHECK14-NEXT: store i32 [[CONV2]], i32* [[DOTOMP_UB]], align 4
// CHECK14-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK14-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK14-NEXT: [[TMP3:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK14-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK14-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
// CHECK14-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP5]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP3]])
// CHECK14-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK14: omp.dispatch.cond:
// CHECK14-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK14-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK14-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP7]] to i32
// CHECK14-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], [[CONV3]]
// CHECK14-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK14: cond.true:
// CHECK14-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK14-NEXT: [[CONV4:%.*]] = trunc i64 [[TMP8]] to i32
// CHECK14-NEXT: br label [[COND_END:%.*]]
// CHECK14: cond.false:
// CHECK14-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK14-NEXT: br label [[COND_END]]
// CHECK14: cond.end:
// CHECK14-NEXT: [[COND:%.*]] = phi i32 [ [[CONV4]], [[COND_TRUE]] ], [ [[TMP9]], [[COND_FALSE]] ]
// CHECK14-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK14-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK14-NEXT: store i32 [[TMP10]], i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK14-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]]
// CHECK14-NEXT: br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK14: omp.dispatch.body:
// CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK14: omp.inner.for.cond:
// CHECK14-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK14-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
// CHECK14-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK14: omp.inner.for.body:
// CHECK14-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP15]], 1
// CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK14-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK14-NEXT: [[TMP16:%.*]] = load i32, i32* [[I]], align 4
// CHECK14-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP16]] to i64
// CHECK14-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]]
// CHECK14-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK14-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK14: omp.body.continue:
// CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK14: omp.inner.for.inc:
// CHECK14-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP17]], 1
// CHECK14-NEXT: store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK14: omp.inner.for.end:
// CHECK14-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK14: omp.dispatch.inc:
// CHECK14-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK14-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK14-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
// CHECK14-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_LB]], align 4
// CHECK14-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK14-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK14-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
// CHECK14-NEXT: store i32 [[ADD9]], i32* [[DOTOMP_UB]], align 4
// CHECK14-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK14: omp.dispatch.end:
// CHECK14-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
// CHECK14-NEXT: ret void
//
//
// CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124
// CHECK14-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK14-NEXT: entry:
// CHECK14-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK14-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
// CHECK14-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..25 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]])
// CHECK14-NEXT: ret void
//
//
// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..25
// CHECK14-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK14-NEXT: entry:
// CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK14-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK14-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK14-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK14-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK14-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
// CHECK14-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
// CHECK14-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK14-NEXT: store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK14-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK14-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK14-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK14-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK14-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
// CHECK14-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK14: cond.true:
// CHECK14-NEXT: br label [[COND_END:%.*]]
// CHECK14: cond.false:
// CHECK14-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: br label [[COND_END]]
// CHECK14: cond.end:
// CHECK14-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK14-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK14-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK14: omp.inner.for.cond:
// CHECK14-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK14-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK14: omp.inner.for.body:
// CHECK14-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK14-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
// CHECK14-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]])
// CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK14: omp.inner.for.inc:
// CHECK14-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
// CHECK14-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK14: omp.inner.for.end:
// CHECK14-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK14: omp.loop.exit:
// CHECK14-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK14-NEXT: ret void
//
//
// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..26
// CHECK14-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK14-NEXT: entry:
// CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK14-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK14-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK14-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK14-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK14-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK14-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK14-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
// CHECK14-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
// CHECK14-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK14-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4
// CHECK14-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK14-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
// CHECK14-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK14-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
// CHECK14-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
// CHECK14-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
// CHECK14-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK14-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK14-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK14-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK14-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK14-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
// CHECK14-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32 35, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 1)
// CHECK14-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK14: omp.dispatch.cond:
// CHECK14-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
// CHECK14-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
// CHECK14-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK14: omp.dispatch.body:
// CHECK14-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK14-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK14: omp.inner.for.cond:
// CHECK14-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
// CHECK14-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !21
// CHECK14-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
// CHECK14-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK14: omp.inner.for.body:
// CHECK14-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
// CHECK14-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
// CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK14-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !21
// CHECK14-NEXT: [[TMP12:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !21
// CHECK14-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP12]] to i64
// CHECK14-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]]
// CHECK14-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !21
// CHECK14-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK14: omp.body.continue:
// CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK14: omp.inner.for.inc:
// CHECK14-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
// CHECK14-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP13]], 1
// CHECK14-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
// CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]]
// CHECK14: omp.inner.for.end:
// CHECK14-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK14: omp.dispatch.inc:
// CHECK14-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK14: omp.dispatch.end:
// CHECK14-NEXT: ret void
//
//
// CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128
// CHECK14-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK14-NEXT: entry:
// CHECK14-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK14-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
// CHECK14-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
// CHECK14-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
// CHECK14-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
// CHECK14-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
// CHECK14-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK14-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
// CHECK14-NEXT: store i32 [[TMP1]], i32* [[CONV1]], align 4
// CHECK14-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..29 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP2]])
// CHECK14-NEXT: ret void
//
//
// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..29
// CHECK14-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK14-NEXT: entry:
// CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK14-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK14-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK14-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
// CHECK14-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK14-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK14-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
// CHECK14-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
// CHECK14-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
// CHECK14-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
// CHECK14-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK14-NEXT: store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK14-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK14-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK14-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK14-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK14-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
// CHECK14-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK14: cond.true:
// CHECK14-NEXT: br label [[COND_END:%.*]]
// CHECK14: cond.false:
// CHECK14-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: br label [[COND_END]]
// CHECK14: cond.end:
// CHECK14-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK14-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK14-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK14: omp.inner.for.cond:
// CHECK14-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK14-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK14: omp.inner.for.body:
// CHECK14-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK14-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
// CHECK14-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK14-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
// CHECK14-NEXT: [[TMP12:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK14-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
// CHECK14-NEXT: store i32 [[TMP12]], i32* [[CONV2]], align 4
// CHECK14-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..30 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]])
// CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK14: omp.inner.for.inc:
// CHECK14-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]]
// CHECK14-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK14: omp.inner.for.end:
// CHECK14-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK14: omp.loop.exit:
// CHECK14-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK14-NEXT: ret void
//
//
// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..30
// CHECK14-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK14-NEXT: entry:
// CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK14-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK14-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK14-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK14-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK14-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK14-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK14-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK14-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
// CHECK14-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
// CHECK14-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
// CHECK14-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
// CHECK14-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK14-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4
// CHECK14-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK14-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
// CHECK14-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK14-NEXT: [[CONV2:%.*]] = trunc i64 [[TMP2]] to i32
// CHECK14-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_LB]], align 4
// CHECK14-NEXT: store i32 [[CONV2]], i32* [[DOTOMP_UB]], align 4
// CHECK14-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK14-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK14-NEXT: [[TMP3:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK14-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK14-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK14-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK14-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
// CHECK14-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], i32 35, i32 [[TMP4]], i32 [[TMP5]], i32 1, i32 [[TMP3]])
// CHECK14-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK14: omp.dispatch.cond:
// CHECK14-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
// CHECK14-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP8]], 0
// CHECK14-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK14: omp.dispatch.body:
// CHECK14-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK14-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
// CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK14: omp.inner.for.cond:
// CHECK14-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
// CHECK14-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !24
// CHECK14-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
// CHECK14-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK14: omp.inner.for.body:
// CHECK14-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
// CHECK14-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
// CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK14-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !24
// CHECK14-NEXT: [[TMP13:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !24
// CHECK14-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP13]] to i64
// CHECK14-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]]
// CHECK14-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !24
// CHECK14-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK14: omp.body.continue:
// CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK14: omp.inner.for.inc:
// CHECK14-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
// CHECK14-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP14]], 1
// CHECK14-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
// CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]]
// CHECK14: omp.inner.for.end:
// CHECK14-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK14: omp.dispatch.inc:
// CHECK14-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK14: omp.dispatch.end:
// CHECK14-NEXT: ret void
//
//
// CHECK14-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
// CHECK14-SAME: () #[[ATTR5:[0-9]+]] {
// CHECK14-NEXT: entry:
// CHECK14-NEXT: call void @__tgt_register_requires(i64 1)
// CHECK14-NEXT: ret void
//
//
// CHECK15-LABEL: define {{[^@]+}}@main
// CHECK15-SAME: (i32 [[ARGC:%.*]], i8** [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK15-NEXT: entry:
// CHECK15-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[ARGV_ADDR:%.*]] = alloca i8**, align 4
// CHECK15-NEXT: [[N:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4
// CHECK15-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[M:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4
// CHECK15-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4
// CHECK15-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4
// CHECK15-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4
// CHECK15-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[N_CASTED3:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [3 x i8*], align 4
// CHECK15-NEXT: [[DOTOFFLOAD_PTRS5:%.*]] = alloca [3 x i8*], align 4
// CHECK15-NEXT: [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [3 x i8*], align 4
// CHECK15-NEXT: [[DOTOFFLOAD_SIZES7:%.*]] = alloca [3 x i64], align 4
// CHECK15-NEXT: [[_TMP8:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTCAPTURE_EXPR_17:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[N_CASTED18:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOFFLOAD_BASEPTRS19:%.*]] = alloca [4 x i8*], align 4
// CHECK15-NEXT: [[DOTOFFLOAD_PTRS20:%.*]] = alloca [4 x i8*], align 4
// CHECK15-NEXT: [[DOTOFFLOAD_MAPPERS21:%.*]] = alloca [4 x i8*], align 4
// CHECK15-NEXT: [[DOTOFFLOAD_SIZES22:%.*]] = alloca [4 x i64], align 4
// CHECK15-NEXT: [[_TMP23:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[N_CASTED32:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOFFLOAD_BASEPTRS33:%.*]] = alloca [3 x i8*], align 4
// CHECK15-NEXT: [[DOTOFFLOAD_PTRS34:%.*]] = alloca [3 x i8*], align 4
// CHECK15-NEXT: [[DOTOFFLOAD_MAPPERS35:%.*]] = alloca [3 x i8*], align 4
// CHECK15-NEXT: [[DOTOFFLOAD_SIZES36:%.*]] = alloca [3 x i64], align 4
// CHECK15-NEXT: [[_TMP37:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTCAPTURE_EXPR_38:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTCAPTURE_EXPR_39:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTCAPTURE_EXPR_46:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[N_CASTED47:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTCAPTURE_EXPR__CASTED48:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOFFLOAD_BASEPTRS49:%.*]] = alloca [4 x i8*], align 4
// CHECK15-NEXT: [[DOTOFFLOAD_PTRS50:%.*]] = alloca [4 x i8*], align 4
// CHECK15-NEXT: [[DOTOFFLOAD_MAPPERS51:%.*]] = alloca [4 x i8*], align 4
// CHECK15-NEXT: [[DOTOFFLOAD_SIZES52:%.*]] = alloca [4 x i64], align 4
// CHECK15-NEXT: [[_TMP53:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTCAPTURE_EXPR_54:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTCAPTURE_EXPR_55:%.*]] = alloca i32, align 4
// CHECK15-NEXT: store i32 0, i32* [[RETVAL]], align 4
// CHECK15-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
// CHECK15-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 4
// CHECK15-NEXT: store i32 100, i32* [[N]], align 4
// CHECK15-NEXT: [[TMP0:%.*]] = load i32, i32* [[N]], align 4
// CHECK15-NEXT: [[TMP1:%.*]] = call i8* @llvm.stacksave()
// CHECK15-NEXT: store i8* [[TMP1]], i8** [[SAVED_STACK]], align 4
// CHECK15-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4
// CHECK15-NEXT: store i32 [[TMP0]], i32* [[__VLA_EXPR0]], align 4
// CHECK15-NEXT: store i32 10, i32* [[M]], align 4
// CHECK15-NEXT: [[TMP2:%.*]] = load i32, i32* [[N]], align 4
// CHECK15-NEXT: store i32 [[TMP2]], i32* [[N_CASTED]], align 4
// CHECK15-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
// CHECK15-NEXT: [[TMP4:%.*]] = mul nuw i32 [[TMP0]], 4
// CHECK15-NEXT: [[TMP5:%.*]] = sext i32 [[TMP4]] to i64
// CHECK15-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK15-NEXT: [[TMP7:%.*]] = bitcast i8** [[TMP6]] to i32*
// CHECK15-NEXT: store i32 [[TMP3]], i32* [[TMP7]], align 4
// CHECK15-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK15-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i32*
// CHECK15-NEXT: store i32 [[TMP3]], i32* [[TMP9]], align 4
// CHECK15-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
// CHECK15-NEXT: store i64 4, i64* [[TMP10]], align 4
// CHECK15-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// CHECK15-NEXT: store i8* null, i8** [[TMP11]], align 4
// CHECK15-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
// CHECK15-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32*
// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP13]], align 4
// CHECK15-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
// CHECK15-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32*
// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP15]], align 4
// CHECK15-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1
// CHECK15-NEXT: store i64 4, i64* [[TMP16]], align 4
// CHECK15-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
// CHECK15-NEXT: store i8* null, i8** [[TMP17]], align 4
// CHECK15-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
// CHECK15-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32**
// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 4
// CHECK15-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
// CHECK15-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32**
// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 4
// CHECK15-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2
// CHECK15-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 4
// CHECK15-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
// CHECK15-NEXT: store i8* null, i8** [[TMP23]], align 4
// CHECK15-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK15-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK15-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
// CHECK15-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4
// CHECK15-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK15-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK15-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0
// CHECK15-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK15-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK15-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK15-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1
// CHECK15-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64
// CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]])
// CHECK15-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK15-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
// CHECK15-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK15: omp_offload.failed:
// CHECK15-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139(i32 [[TMP3]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3:[0-9]+]]
// CHECK15-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK15: omp_offload.cont:
// CHECK15-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4
// CHECK15-NEXT: store i32 [[TMP33]], i32* [[N_CASTED3]], align 4
// CHECK15-NEXT: [[TMP34:%.*]] = load i32, i32* [[N_CASTED3]], align 4
// CHECK15-NEXT: [[TMP35:%.*]] = mul nuw i32 [[TMP0]], 4
// CHECK15-NEXT: [[TMP36:%.*]] = sext i32 [[TMP35]] to i64
// CHECK15-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
// CHECK15-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32*
// CHECK15-NEXT: store i32 [[TMP34]], i32* [[TMP38]], align 4
// CHECK15-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
// CHECK15-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i32*
// CHECK15-NEXT: store i32 [[TMP34]], i32* [[TMP40]], align 4
// CHECK15-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0
// CHECK15-NEXT: store i64 4, i64* [[TMP41]], align 4
// CHECK15-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0
// CHECK15-NEXT: store i8* null, i8** [[TMP42]], align 4
// CHECK15-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1
// CHECK15-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32*
// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP44]], align 4
// CHECK15-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1
// CHECK15-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32*
// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP46]], align 4
// CHECK15-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 1
// CHECK15-NEXT: store i64 4, i64* [[TMP47]], align 4
// CHECK15-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1
// CHECK15-NEXT: store i8* null, i8** [[TMP48]], align 4
// CHECK15-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2
// CHECK15-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to i32**
// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP50]], align 4
// CHECK15-NEXT: [[TMP51:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2
// CHECK15-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i32**
// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP52]], align 4
// CHECK15-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 2
// CHECK15-NEXT: store i64 [[TMP36]], i64* [[TMP53]], align 4
// CHECK15-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2
// CHECK15-NEXT: store i8* null, i8** [[TMP54]], align 4
// CHECK15-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
// CHECK15-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
// CHECK15-NEXT: [[TMP57:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0
// CHECK15-NEXT: [[TMP58:%.*]] = load i32, i32* [[N]], align 4
// CHECK15-NEXT: store i32 [[TMP58]], i32* [[DOTCAPTURE_EXPR_9]], align 4
// CHECK15-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4
// CHECK15-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP59]], 0
// CHECK15-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1
// CHECK15-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1
// CHECK15-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4
// CHECK15-NEXT: [[TMP60:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4
// CHECK15-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP60]], 1
// CHECK15-NEXT: [[TMP61:%.*]] = zext i32 [[ADD14]] to i64
// CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP61]])
// CHECK15-NEXT: [[TMP62:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143.region_id, i32 3, i8** [[TMP55]], i8** [[TMP56]], i64* [[TMP57]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK15-NEXT: [[TMP63:%.*]] = icmp ne i32 [[TMP62]], 0
// CHECK15-NEXT: br i1 [[TMP63]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]]
// CHECK15: omp_offload.failed15:
// CHECK15-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143(i32 [[TMP34]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]]
// CHECK15-NEXT: br label [[OMP_OFFLOAD_CONT16]]
// CHECK15: omp_offload.cont16:
// CHECK15-NEXT: [[TMP64:%.*]] = load i32, i32* [[M]], align 4
// CHECK15-NEXT: store i32 [[TMP64]], i32* [[DOTCAPTURE_EXPR_17]], align 4
// CHECK15-NEXT: [[TMP65:%.*]] = load i32, i32* [[N]], align 4
// CHECK15-NEXT: store i32 [[TMP65]], i32* [[N_CASTED18]], align 4
// CHECK15-NEXT: [[TMP66:%.*]] = load i32, i32* [[N_CASTED18]], align 4
// CHECK15-NEXT: [[TMP67:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_17]], align 4
// CHECK15-NEXT: store i32 [[TMP67]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK15-NEXT: [[TMP68:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK15-NEXT: [[TMP69:%.*]] = mul nuw i32 [[TMP0]], 4
// CHECK15-NEXT: [[TMP70:%.*]] = sext i32 [[TMP69]] to i64
// CHECK15-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0
// CHECK15-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i32*
// CHECK15-NEXT: store i32 [[TMP66]], i32* [[TMP72]], align 4
// CHECK15-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 0
// CHECK15-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32*
// CHECK15-NEXT: store i32 [[TMP66]], i32* [[TMP74]], align 4
// CHECK15-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 0
// CHECK15-NEXT: store i64 4, i64* [[TMP75]], align 4
// CHECK15-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 0
// CHECK15-NEXT: store i8* null, i8** [[TMP76]], align 4
// CHECK15-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 1
// CHECK15-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i32*
// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP78]], align 4
// CHECK15-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 1
// CHECK15-NEXT: [[TMP80:%.*]] = bitcast i8** [[TMP79]] to i32*
// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP80]], align 4
// CHECK15-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 1
// CHECK15-NEXT: store i64 4, i64* [[TMP81]], align 4
// CHECK15-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 1
// CHECK15-NEXT: store i8* null, i8** [[TMP82]], align 4
// CHECK15-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 2
// CHECK15-NEXT: [[TMP84:%.*]] = bitcast i8** [[TMP83]] to i32**
// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP84]], align 4
// CHECK15-NEXT: [[TMP85:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 2
// CHECK15-NEXT: [[TMP86:%.*]] = bitcast i8** [[TMP85]] to i32**
// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP86]], align 4
// CHECK15-NEXT: [[TMP87:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 2
// CHECK15-NEXT: store i64 [[TMP70]], i64* [[TMP87]], align 4
// CHECK15-NEXT: [[TMP88:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 2
// CHECK15-NEXT: store i8* null, i8** [[TMP88]], align 4
// CHECK15-NEXT: [[TMP89:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 3
// CHECK15-NEXT: [[TMP90:%.*]] = bitcast i8** [[TMP89]] to i32*
// CHECK15-NEXT: store i32 [[TMP68]], i32* [[TMP90]], align 4
// CHECK15-NEXT: [[TMP91:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 3
// CHECK15-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to i32*
// CHECK15-NEXT: store i32 [[TMP68]], i32* [[TMP92]], align 4
// CHECK15-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 3
// CHECK15-NEXT: store i64 4, i64* [[TMP93]], align 4
// CHECK15-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 3
// CHECK15-NEXT: store i8* null, i8** [[TMP94]], align 4
// CHECK15-NEXT: [[TMP95:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0
// CHECK15-NEXT: [[TMP96:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 0
// CHECK15-NEXT: [[TMP97:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 0
// CHECK15-NEXT: [[TMP98:%.*]] = load i32, i32* [[N]], align 4
// CHECK15-NEXT: store i32 [[TMP98]], i32* [[DOTCAPTURE_EXPR_24]], align 4
// CHECK15-NEXT: [[TMP99:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4
// CHECK15-NEXT: [[SUB26:%.*]] = sub nsw i32 [[TMP99]], 0
// CHECK15-NEXT: [[DIV27:%.*]] = sdiv i32 [[SUB26]], 1
// CHECK15-NEXT: [[SUB28:%.*]] = sub nsw i32 [[DIV27]], 1
// CHECK15-NEXT: store i32 [[SUB28]], i32* [[DOTCAPTURE_EXPR_25]], align 4
// CHECK15-NEXT: [[TMP100:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4
// CHECK15-NEXT: [[ADD29:%.*]] = add nsw i32 [[TMP100]], 1
// CHECK15-NEXT: [[TMP101:%.*]] = zext i32 [[ADD29]] to i64
// CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP101]])
// CHECK15-NEXT: [[TMP102:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147.region_id, i32 4, i8** [[TMP95]], i8** [[TMP96]], i64* [[TMP97]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK15-NEXT: [[TMP103:%.*]] = icmp ne i32 [[TMP102]], 0
// CHECK15-NEXT: br i1 [[TMP103]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]]
// CHECK15: omp_offload.failed30:
// CHECK15-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147(i32 [[TMP66]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP68]]) #[[ATTR3]]
// CHECK15-NEXT: br label [[OMP_OFFLOAD_CONT31]]
// CHECK15: omp_offload.cont31:
// CHECK15-NEXT: [[TMP104:%.*]] = load i32, i32* [[N]], align 4
// CHECK15-NEXT: store i32 [[TMP104]], i32* [[N_CASTED32]], align 4
// CHECK15-NEXT: [[TMP105:%.*]] = load i32, i32* [[N_CASTED32]], align 4
// CHECK15-NEXT: [[TMP106:%.*]] = mul nuw i32 [[TMP0]], 4
// CHECK15-NEXT: [[TMP107:%.*]] = sext i32 [[TMP106]] to i64
// CHECK15-NEXT: [[TMP108:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0
// CHECK15-NEXT: [[TMP109:%.*]] = bitcast i8** [[TMP108]] to i32*
// CHECK15-NEXT: store i32 [[TMP105]], i32* [[TMP109]], align 4
// CHECK15-NEXT: [[TMP110:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0
// CHECK15-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to i32*
// CHECK15-NEXT: store i32 [[TMP105]], i32* [[TMP111]], align 4
// CHECK15-NEXT: [[TMP112:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 0
// CHECK15-NEXT: store i64 4, i64* [[TMP112]], align 4
// CHECK15-NEXT: [[TMP113:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i32 0, i32 0
// CHECK15-NEXT: store i8* null, i8** [[TMP113]], align 4
// CHECK15-NEXT: [[TMP114:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 1
// CHECK15-NEXT: [[TMP115:%.*]] = bitcast i8** [[TMP114]] to i32*
// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP115]], align 4
// CHECK15-NEXT: [[TMP116:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 1
// CHECK15-NEXT: [[TMP117:%.*]] = bitcast i8** [[TMP116]] to i32*
// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP117]], align 4
// CHECK15-NEXT: [[TMP118:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 1
// CHECK15-NEXT: store i64 4, i64* [[TMP118]], align 4
// CHECK15-NEXT: [[TMP119:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i32 0, i32 1
// CHECK15-NEXT: store i8* null, i8** [[TMP119]], align 4
// CHECK15-NEXT: [[TMP120:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 2
// CHECK15-NEXT: [[TMP121:%.*]] = bitcast i8** [[TMP120]] to i32**
// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP121]], align 4
// CHECK15-NEXT: [[TMP122:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 2
// CHECK15-NEXT: [[TMP123:%.*]] = bitcast i8** [[TMP122]] to i32**
// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP123]], align 4
// CHECK15-NEXT: [[TMP124:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 2
// CHECK15-NEXT: store i64 [[TMP107]], i64* [[TMP124]], align 4
// CHECK15-NEXT: [[TMP125:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i32 0, i32 2
// CHECK15-NEXT: store i8* null, i8** [[TMP125]], align 4
// CHECK15-NEXT: [[TMP126:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0
// CHECK15-NEXT: [[TMP127:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0
// CHECK15-NEXT: [[TMP128:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 0
// CHECK15-NEXT: [[TMP129:%.*]] = load i32, i32* [[N]], align 4
// CHECK15-NEXT: store i32 [[TMP129]], i32* [[DOTCAPTURE_EXPR_38]], align 4
// CHECK15-NEXT: [[TMP130:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_38]], align 4
// CHECK15-NEXT: [[SUB40:%.*]] = sub nsw i32 [[TMP130]], 0
// CHECK15-NEXT: [[DIV41:%.*]] = sdiv i32 [[SUB40]], 1
// CHECK15-NEXT: [[SUB42:%.*]] = sub nsw i32 [[DIV41]], 1
// CHECK15-NEXT: store i32 [[SUB42]], i32* [[DOTCAPTURE_EXPR_39]], align 4
// CHECK15-NEXT: [[TMP131:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_39]], align 4
// CHECK15-NEXT: [[ADD43:%.*]] = add nsw i32 [[TMP131]], 1
// CHECK15-NEXT: [[TMP132:%.*]] = zext i32 [[ADD43]] to i64
// CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP132]])
// CHECK15-NEXT: [[TMP133:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151.region_id, i32 3, i8** [[TMP126]], i8** [[TMP127]], i64* [[TMP128]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK15-NEXT: [[TMP134:%.*]] = icmp ne i32 [[TMP133]], 0
// CHECK15-NEXT: br i1 [[TMP134]], label [[OMP_OFFLOAD_FAILED44:%.*]], label [[OMP_OFFLOAD_CONT45:%.*]]
// CHECK15: omp_offload.failed44:
// CHECK15-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151(i32 [[TMP105]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]]
// CHECK15-NEXT: br label [[OMP_OFFLOAD_CONT45]]
// CHECK15: omp_offload.cont45:
// CHECK15-NEXT: [[TMP135:%.*]] = load i32, i32* [[M]], align 4
// CHECK15-NEXT: store i32 [[TMP135]], i32* [[DOTCAPTURE_EXPR_46]], align 4
// CHECK15-NEXT: [[TMP136:%.*]] = load i32, i32* [[N]], align 4
// CHECK15-NEXT: store i32 [[TMP136]], i32* [[N_CASTED47]], align 4
// CHECK15-NEXT: [[TMP137:%.*]] = load i32, i32* [[N_CASTED47]], align 4
// CHECK15-NEXT: [[TMP138:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_46]], align 4
// CHECK15-NEXT: store i32 [[TMP138]], i32* [[DOTCAPTURE_EXPR__CASTED48]], align 4
// CHECK15-NEXT: [[TMP139:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED48]], align 4
// CHECK15-NEXT: [[TMP140:%.*]] = mul nuw i32 [[TMP0]], 4
// CHECK15-NEXT: [[TMP141:%.*]] = sext i32 [[TMP140]] to i64
// CHECK15-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 0
// CHECK15-NEXT: [[TMP143:%.*]] = bitcast i8** [[TMP142]] to i32*
// CHECK15-NEXT: store i32 [[TMP137]], i32* [[TMP143]], align 4
// CHECK15-NEXT: [[TMP144:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 0
// CHECK15-NEXT: [[TMP145:%.*]] = bitcast i8** [[TMP144]] to i32*
// CHECK15-NEXT: store i32 [[TMP137]], i32* [[TMP145]], align 4
// CHECK15-NEXT: [[TMP146:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 0
// CHECK15-NEXT: store i64 4, i64* [[TMP146]], align 4
// CHECK15-NEXT: [[TMP147:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 0
// CHECK15-NEXT: store i8* null, i8** [[TMP147]], align 4
// CHECK15-NEXT: [[TMP148:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 1
// CHECK15-NEXT: [[TMP149:%.*]] = bitcast i8** [[TMP148]] to i32*
// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP149]], align 4
// CHECK15-NEXT: [[TMP150:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 1
// CHECK15-NEXT: [[TMP151:%.*]] = bitcast i8** [[TMP150]] to i32*
// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP151]], align 4
// CHECK15-NEXT: [[TMP152:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 1
// CHECK15-NEXT: store i64 4, i64* [[TMP152]], align 4
// CHECK15-NEXT: [[TMP153:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 1
// CHECK15-NEXT: store i8* null, i8** [[TMP153]], align 4
// CHECK15-NEXT: [[TMP154:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 2
// CHECK15-NEXT: [[TMP155:%.*]] = bitcast i8** [[TMP154]] to i32**
// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP155]], align 4
// CHECK15-NEXT: [[TMP156:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 2
// CHECK15-NEXT: [[TMP157:%.*]] = bitcast i8** [[TMP156]] to i32**
// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP157]], align 4
// CHECK15-NEXT: [[TMP158:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 2
// CHECK15-NEXT: store i64 [[TMP141]], i64* [[TMP158]], align 4
// CHECK15-NEXT: [[TMP159:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 2
// CHECK15-NEXT: store i8* null, i8** [[TMP159]], align 4
// CHECK15-NEXT: [[TMP160:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 3
// CHECK15-NEXT: [[TMP161:%.*]] = bitcast i8** [[TMP160]] to i32*
// CHECK15-NEXT: store i32 [[TMP139]], i32* [[TMP161]], align 4
// CHECK15-NEXT: [[TMP162:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 3
// CHECK15-NEXT: [[TMP163:%.*]] = bitcast i8** [[TMP162]] to i32*
// CHECK15-NEXT: store i32 [[TMP139]], i32* [[TMP163]], align 4
// CHECK15-NEXT: [[TMP164:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 3
// CHECK15-NEXT: store i64 4, i64* [[TMP164]], align 4
// CHECK15-NEXT: [[TMP165:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 3
// CHECK15-NEXT: store i8* null, i8** [[TMP165]], align 4
// CHECK15-NEXT: [[TMP166:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 0
// CHECK15-NEXT: [[TMP167:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 0
// CHECK15-NEXT: [[TMP168:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 0
// CHECK15-NEXT: [[TMP169:%.*]] = load i32, i32* [[N]], align 4
// CHECK15-NEXT: store i32 [[TMP169]], i32* [[DOTCAPTURE_EXPR_54]], align 4
// CHECK15-NEXT: [[TMP170:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_54]], align 4
// CHECK15-NEXT: [[SUB56:%.*]] = sub nsw i32 [[TMP170]], 0
// CHECK15-NEXT: [[DIV57:%.*]] = sdiv i32 [[SUB56]], 1
// CHECK15-NEXT: [[SUB58:%.*]] = sub nsw i32 [[DIV57]], 1
// CHECK15-NEXT: store i32 [[SUB58]], i32* [[DOTCAPTURE_EXPR_55]], align 4
// CHECK15-NEXT: [[TMP171:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_55]], align 4
// CHECK15-NEXT: [[ADD59:%.*]] = add nsw i32 [[TMP171]], 1
// CHECK15-NEXT: [[TMP172:%.*]] = zext i32 [[ADD59]] to i64
// CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP172]])
// CHECK15-NEXT: [[TMP173:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155.region_id, i32 4, i8** [[TMP166]], i8** [[TMP167]], i64* [[TMP168]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK15-NEXT: [[TMP174:%.*]] = icmp ne i32 [[TMP173]], 0
// CHECK15-NEXT: br i1 [[TMP174]], label [[OMP_OFFLOAD_FAILED60:%.*]], label [[OMP_OFFLOAD_CONT61:%.*]]
// CHECK15: omp_offload.failed60:
// CHECK15-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155(i32 [[TMP137]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP139]]) #[[ATTR3]]
// CHECK15-NEXT: br label [[OMP_OFFLOAD_CONT61]]
// CHECK15: omp_offload.cont61:
// CHECK15-NEXT: [[TMP175:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4
// CHECK15-NEXT: [[CALL:%.*]] = call i32 @_Z5tmainIiLi10EEiT_(i32 [[TMP175]])
// CHECK15-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4
// CHECK15-NEXT: [[TMP176:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
// CHECK15-NEXT: call void @llvm.stackrestore(i8* [[TMP176]])
// CHECK15-NEXT: [[TMP177:%.*]] = load i32, i32* [[RETVAL]], align 4
// CHECK15-NEXT: ret i32 [[TMP177]]
//
//
// CHECK15-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139
// CHECK15-SAME: (i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2:[0-9]+]] {
// CHECK15-NEXT: entry:
// CHECK15-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK15-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
// CHECK15-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK15-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK15-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK15-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK15-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK15-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK15-NEXT: store i32 [[TMP2]], i32* [[N_CASTED]], align 4
// CHECK15-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]])
// CHECK15-NEXT: ret void
//
//
// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK15-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK15-NEXT: entry:
// CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK15-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK15-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK15-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[I3:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
// CHECK15-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK15-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK15-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK15-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK15-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK15-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK15-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK15-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK15-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK15-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK15-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK15-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK15-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK15-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK15-NEXT: store i32 0, i32* [[I]], align 4
// CHECK15-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK15-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK15-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK15: omp.precond.then:
// CHECK15-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK15-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK15-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK15-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK15-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK15-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
// CHECK15-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK15-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK15-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
// CHECK15-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK15: cond.true:
// CHECK15-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK15-NEXT: br label [[COND_END:%.*]]
// CHECK15: cond.false:
// CHECK15-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: br label [[COND_END]]
// CHECK15: cond.end:
// CHECK15-NEXT: [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
// CHECK15-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK15-NEXT: store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK15: omp.inner.for.cond:
// CHECK15-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
// CHECK15-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK15: omp.inner.for.body:
// CHECK15-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK15-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: [[TMP17:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK15-NEXT: store i32 [[TMP17]], i32* [[N_CASTED]], align 4
// CHECK15-NEXT: [[TMP18:%.*]] = load i32, i32* [[N_CASTED]], align 4
// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]])
// CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK15: omp.inner.for.inc:
// CHECK15-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
// CHECK15-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK15: omp.inner.for.end:
// CHECK15-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK15: omp.loop.exit:
// CHECK15-NEXT: [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK15-NEXT: [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
// CHECK15-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
// CHECK15-NEXT: br label [[OMP_PRECOND_END]]
// CHECK15: omp.precond.end:
// CHECK15-NEXT: ret void
//
//
// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK15-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK15-NEXT: entry:
// CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK15-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK15-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK15-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[I3:%.*]] = alloca i32, align 4
// CHECK15-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK15-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK15-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK15-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK15-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK15-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK15-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK15-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK15-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK15-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK15-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK15-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK15-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK15-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK15-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK15-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK15-NEXT: store i32 0, i32* [[I]], align 4
// CHECK15-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK15-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK15-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK15: omp.precond.then:
// CHECK15-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK15-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK15-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
// CHECK15-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK15-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK15-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_LB]], align 4
// CHECK15-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
// CHECK15-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK15-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK15-NEXT: [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK15-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
// CHECK15-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP9]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK15-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK15-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK15-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
// CHECK15-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK15: cond.true:
// CHECK15-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK15-NEXT: br label [[COND_END:%.*]]
// CHECK15: cond.false:
// CHECK15-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK15-NEXT: br label [[COND_END]]
// CHECK15: cond.end:
// CHECK15-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
// CHECK15-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK15-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK15-NEXT: store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK15: omp.inner.for.cond:
// CHECK15-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK15-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
// CHECK15-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK15: omp.inner.for.body:
// CHECK15-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP17]], 1
// CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK15-NEXT: store i32 [[ADD]], i32* [[I3]], align 4
// CHECK15-NEXT: [[TMP18:%.*]] = load i32, i32* [[I3]], align 4
// CHECK15-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 [[TMP18]]
// CHECK15-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK15-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK15: omp.body.continue:
// CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK15: omp.inner.for.inc:
// CHECK15-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP19]], 1
// CHECK15-NEXT: store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK15: omp.inner.for.end:
// CHECK15-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK15: omp.loop.exit:
// CHECK15-NEXT: [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK15-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
// CHECK15-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
// CHECK15-NEXT: br label [[OMP_PRECOND_END]]
// CHECK15: omp.precond.end:
// CHECK15-NEXT: ret void
//
//
// CHECK15-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143
// CHECK15-SAME: (i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK15-NEXT: entry:
// CHECK15-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK15-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
// CHECK15-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK15-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK15-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK15-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK15-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK15-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK15-NEXT: store i32 [[TMP2]], i32* [[N_CASTED]], align 4
// CHECK15-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]])
// CHECK15-NEXT: ret void
//
//
// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..2
// CHECK15-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK15-NEXT: entry:
// CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK15-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK15-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK15-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[I3:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
// CHECK15-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK15-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK15-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK15-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK15-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK15-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK15-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK15-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK15-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK15-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK15-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK15-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK15-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK15-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK15-NEXT: store i32 0, i32* [[I]], align 4
// CHECK15-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK15-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK15-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK15: omp.precond.then:
// CHECK15-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK15-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK15-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK15-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK15-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK15-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
// CHECK15-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK15-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK15-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
// CHECK15-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK15: cond.true:
// CHECK15-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK15-NEXT: br label [[COND_END:%.*]]
// CHECK15: cond.false:
// CHECK15-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: br label [[COND_END]]
// CHECK15: cond.end:
// CHECK15-NEXT: [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
// CHECK15-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK15-NEXT: store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK15: omp.inner.for.cond:
// CHECK15-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
// CHECK15-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK15: omp.inner.for.body:
// CHECK15-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK15-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: [[TMP17:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK15-NEXT: store i32 [[TMP17]], i32* [[N_CASTED]], align 4
// CHECK15-NEXT: [[TMP18:%.*]] = load i32, i32* [[N_CASTED]], align 4
// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]])
// CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK15: omp.inner.for.inc:
// CHECK15-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
// CHECK15-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK15: omp.inner.for.end:
// CHECK15-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK15: omp.loop.exit:
// CHECK15-NEXT: [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK15-NEXT: [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
// CHECK15-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
// CHECK15-NEXT: br label [[OMP_PRECOND_END]]
// CHECK15: omp.precond.end:
// CHECK15-NEXT: ret void
//
//
// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..3
// CHECK15-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK15-NEXT: entry:
// CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK15-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK15-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK15-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[I3:%.*]] = alloca i32, align 4
// CHECK15-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK15-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK15-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK15-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK15-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK15-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK15-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK15-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK15-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK15-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK15-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK15-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK15-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK15-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK15-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK15-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK15-NEXT: store i32 0, i32* [[I]], align 4
// CHECK15-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK15-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK15-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK15: omp.precond.then:
// CHECK15-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK15-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK15-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
// CHECK15-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK15-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK15-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_LB]], align 4
// CHECK15-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
// CHECK15-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK15-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK15-NEXT: [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK15-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
// CHECK15-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP9]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK15-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK15-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK15-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
// CHECK15-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK15: cond.true:
// CHECK15-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK15-NEXT: br label [[COND_END:%.*]]
// CHECK15: cond.false:
// CHECK15-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK15-NEXT: br label [[COND_END]]
// CHECK15: cond.end:
// CHECK15-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
// CHECK15-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK15-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK15-NEXT: store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK15: omp.inner.for.cond:
// CHECK15-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK15-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
// CHECK15-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK15: omp.inner.for.body:
// CHECK15-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP17]], 1
// CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK15-NEXT: store i32 [[ADD]], i32* [[I3]], align 4
// CHECK15-NEXT: [[TMP18:%.*]] = load i32, i32* [[I3]], align 4
// CHECK15-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 [[TMP18]]
// CHECK15-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK15-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK15: omp.body.continue:
// CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK15: omp.inner.for.inc:
// CHECK15-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP19]], 1
// CHECK15-NEXT: store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK15: omp.inner.for.end:
// CHECK15-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK15: omp.loop.exit:
// CHECK15-NEXT: [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK15-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
// CHECK15-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
// CHECK15-NEXT: br label [[OMP_PRECOND_END]]
// CHECK15: omp.precond.end:
// CHECK15-NEXT: ret void
//
//
// CHECK15-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147
// CHECK15-SAME: (i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK15-NEXT: entry:
// CHECK15-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK15-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
// CHECK15-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK15-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK15-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK15-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK15-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK15-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK15-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK15-NEXT: store i32 [[TMP2]], i32* [[N_CASTED]], align 4
// CHECK15-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
// CHECK15-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK15-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK15-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]])
// CHECK15-NEXT: ret void
//
//
// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..5
// CHECK15-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK15-NEXT: entry:
// CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK15-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK15-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK15-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[I4:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
// CHECK15-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK15-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK15-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK15-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK15-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK15-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK15-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK15-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK15-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK15-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK15-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK15-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK15-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK15-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK15-NEXT: store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK15-NEXT: store i32 0, i32* [[I]], align 4
// CHECK15-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK15-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK15-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK15: omp.precond.then:
// CHECK15-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK15-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK15-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK15-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK15-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK15-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK15-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
// CHECK15-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP6]])
// CHECK15-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK15-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
// CHECK15-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK15: cond.true:
// CHECK15-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK15-NEXT: br label [[COND_END:%.*]]
// CHECK15: cond.false:
// CHECK15-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: br label [[COND_END]]
// CHECK15: cond.end:
// CHECK15-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
// CHECK15-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK15-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK15: omp.inner.for.cond:
// CHECK15-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP15]], 1
// CHECK15-NEXT: [[CMP6:%.*]] = icmp slt i32 [[TMP14]], [[ADD]]
// CHECK15-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK15: omp.inner.for.body:
// CHECK15-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK15-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: [[TMP18:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK15-NEXT: store i32 [[TMP18]], i32* [[N_CASTED]], align 4
// CHECK15-NEXT: [[TMP19:%.*]] = load i32, i32* [[N_CASTED]], align 4
// CHECK15-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK15-NEXT: store i32 [[TMP20]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK15-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*, i32)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32 [[TMP19]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP21]])
// CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK15: omp.inner.for.inc:
// CHECK15-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK15-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
// CHECK15-NEXT: store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK15-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK15-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP24]], [[TMP25]]
// CHECK15-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_COMB_LB]], align 4
// CHECK15-NEXT: [[TMP26:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK15-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP26]], [[TMP27]]
// CHECK15-NEXT: store i32 [[ADD9]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK15-NEXT: [[CMP10:%.*]] = icmp sgt i32 [[TMP28]], [[TMP29]]
// CHECK15-NEXT: br i1 [[CMP10]], label [[COND_TRUE11:%.*]], label [[COND_FALSE12:%.*]]
// CHECK15: cond.true11:
// CHECK15-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK15-NEXT: br label [[COND_END13:%.*]]
// CHECK15: cond.false12:
// CHECK15-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: br label [[COND_END13]]
// CHECK15: cond.end13:
// CHECK15-NEXT: [[COND14:%.*]] = phi i32 [ [[TMP30]], [[COND_TRUE11]] ], [ [[TMP31]], [[COND_FALSE12]] ]
// CHECK15-NEXT: store i32 [[COND14]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK15-NEXT: store i32 [[TMP32]], i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK15: omp.inner.for.end:
// CHECK15-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK15: omp.loop.exit:
// CHECK15-NEXT: [[TMP33:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK15-NEXT: [[TMP34:%.*]] = load i32, i32* [[TMP33]], align 4
// CHECK15-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP34]])
// CHECK15-NEXT: br label [[OMP_PRECOND_END]]
// CHECK15: omp.precond.end:
// CHECK15-NEXT: ret void
//
//
// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..6
// CHECK15-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK15-NEXT: entry:
// CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK15-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK15-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK15-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[I4:%.*]] = alloca i32, align 4
// CHECK15-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK15-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK15-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK15-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK15-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK15-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK15-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK15-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK15-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK15-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK15-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK15-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK15-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK15-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK15-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK15-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK15-NEXT: store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK15-NEXT: store i32 0, i32* [[I]], align 4
// CHECK15-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK15-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK15-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK15: omp.precond.then:
// CHECK15-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK15-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK15-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
// CHECK15-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK15-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK15-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_LB]], align 4
// CHECK15-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
// CHECK15-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK15-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK15-NEXT: [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK15-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
// CHECK15-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP9]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK15-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK15-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK15-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
// CHECK15-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK15: cond.true:
// CHECK15-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK15-NEXT: br label [[COND_END:%.*]]
// CHECK15: cond.false:
// CHECK15-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK15-NEXT: br label [[COND_END]]
// CHECK15: cond.end:
// CHECK15-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
// CHECK15-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK15-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK15-NEXT: store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK15: omp.inner.for.cond:
// CHECK15-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK15-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
// CHECK15-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK15: omp.inner.for.body:
// CHECK15-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP17]], 1
// CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK15-NEXT: store i32 [[ADD]], i32* [[I4]], align 4
// CHECK15-NEXT: [[TMP18:%.*]] = load i32, i32* [[I4]], align 4
// CHECK15-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 [[TMP18]]
// CHECK15-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK15-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK15: omp.body.continue:
// CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK15: omp.inner.for.inc:
// CHECK15-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP19]], 1
// CHECK15-NEXT: store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK15: omp.inner.for.end:
// CHECK15-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK15: omp.loop.exit:
// CHECK15-NEXT: [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK15-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
// CHECK15-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
// CHECK15-NEXT: br label [[OMP_PRECOND_END]]
// CHECK15: omp.precond.end:
// CHECK15-NEXT: ret void
//
//
// CHECK15-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151
// CHECK15-SAME: (i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK15-NEXT: entry:
// CHECK15-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK15-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
// CHECK15-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK15-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK15-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK15-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK15-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK15-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK15-NEXT: store i32 [[TMP2]], i32* [[N_CASTED]], align 4
// CHECK15-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]])
// CHECK15-NEXT: ret void
//
//
// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..8
// CHECK15-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK15-NEXT: entry:
// CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK15-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK15-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK15-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[I3:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
// CHECK15-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK15-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK15-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK15-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK15-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK15-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK15-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK15-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK15-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK15-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK15-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK15-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK15-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK15-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK15-NEXT: store i32 0, i32* [[I]], align 4
// CHECK15-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK15-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK15-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK15: omp.precond.then:
// CHECK15-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK15-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK15-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK15-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK15-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK15-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
// CHECK15-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK15-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK15-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
// CHECK15-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK15: cond.true:
// CHECK15-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK15-NEXT: br label [[COND_END:%.*]]
// CHECK15: cond.false:
// CHECK15-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: br label [[COND_END]]
// CHECK15: cond.end:
// CHECK15-NEXT: [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
// CHECK15-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK15-NEXT: store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK15: omp.inner.for.cond:
// CHECK15-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
// CHECK15-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK15: omp.inner.for.body:
// CHECK15-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK15-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: [[TMP17:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK15-NEXT: store i32 [[TMP17]], i32* [[N_CASTED]], align 4
// CHECK15-NEXT: [[TMP18:%.*]] = load i32, i32* [[N_CASTED]], align 4
// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]])
// CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK15: omp.inner.for.inc:
// CHECK15-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
// CHECK15-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK15: omp.inner.for.end:
// CHECK15-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK15: omp.loop.exit:
// CHECK15-NEXT: [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK15-NEXT: [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
// CHECK15-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
// CHECK15-NEXT: br label [[OMP_PRECOND_END]]
// CHECK15: omp.precond.end:
// CHECK15-NEXT: ret void
//
//
// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..9
// CHECK15-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK15-NEXT: entry:
// CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK15-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK15-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK15-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[I3:%.*]] = alloca i32, align 4
// CHECK15-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK15-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK15-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK15-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK15-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK15-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK15-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK15-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK15-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK15-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK15-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK15-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK15-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK15-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK15-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK15-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK15-NEXT: store i32 0, i32* [[I]], align 4
// CHECK15-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK15-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK15-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK15: omp.precond.then:
// CHECK15-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK15-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK15-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
// CHECK15-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK15-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK15-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_LB]], align 4
// CHECK15-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
// CHECK15-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK15-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK15-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK15-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK15-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK15-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
// CHECK15-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP11]], i32 35, i32 [[TMP8]], i32 [[TMP9]], i32 1, i32 1)
// CHECK15-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK15: omp.dispatch.cond:
// CHECK15-NEXT: [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK15-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
// CHECK15-NEXT: [[TMP14:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP13]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
// CHECK15-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP14]], 0
// CHECK15-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK15: omp.dispatch.body:
// CHECK15-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK15-NEXT: store i32 [[TMP15]], i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK15: omp.inner.for.cond:
// CHECK15-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !16
// CHECK15-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !16
// CHECK15-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
// CHECK15-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK15: omp.inner.for.body:
// CHECK15-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !16
// CHECK15-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
// CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK15-NEXT: store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !16
// CHECK15-NEXT: [[TMP19:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !16
// CHECK15-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 [[TMP19]]
// CHECK15-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !16
// CHECK15-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK15: omp.body.continue:
// CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK15: omp.inner.for.inc:
// CHECK15-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !16
// CHECK15-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP20]], 1
// CHECK15-NEXT: store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !16
// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP17:![0-9]+]]
// CHECK15: omp.inner.for.end:
// CHECK15-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK15: omp.dispatch.inc:
// CHECK15-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK15: omp.dispatch.end:
// CHECK15-NEXT: br label [[OMP_PRECOND_END]]
// CHECK15: omp.precond.end:
// CHECK15-NEXT: ret void
//
//
// CHECK15-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155
// CHECK15-SAME: (i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK15-NEXT: entry:
// CHECK15-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK15-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
// CHECK15-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK15-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK15-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK15-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK15-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK15-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK15-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK15-NEXT: store i32 [[TMP2]], i32* [[N_CASTED]], align 4
// CHECK15-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
// CHECK15-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK15-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK15-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]])
// CHECK15-NEXT: ret void
//
//
// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..11
// CHECK15-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK15-NEXT: entry:
// CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK15-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK15-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK15-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[I4:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
// CHECK15-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK15-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK15-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK15-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK15-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK15-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK15-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK15-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK15-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK15-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK15-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK15-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK15-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK15-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK15-NEXT: store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK15-NEXT: store i32 0, i32* [[I]], align 4
// CHECK15-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK15-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK15-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK15: omp.precond.then:
// CHECK15-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK15-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK15-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK15-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK15-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK15-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
// CHECK15-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK15-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK15-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
// CHECK15-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK15: cond.true:
// CHECK15-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK15-NEXT: br label [[COND_END:%.*]]
// CHECK15: cond.false:
// CHECK15-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: br label [[COND_END]]
// CHECK15: cond.end:
// CHECK15-NEXT: [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
// CHECK15-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK15-NEXT: store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK15: omp.inner.for.cond:
// CHECK15-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
// CHECK15-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK15: omp.inner.for.body:
// CHECK15-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK15-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: [[TMP17:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK15-NEXT: store i32 [[TMP17]], i32* [[N_CASTED]], align 4
// CHECK15-NEXT: [[TMP18:%.*]] = load i32, i32* [[N_CASTED]], align 4
// CHECK15-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK15-NEXT: store i32 [[TMP19]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK15-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*, i32)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP20]])
// CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK15: omp.inner.for.inc:
// CHECK15-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
// CHECK15-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK15: omp.inner.for.end:
// CHECK15-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK15: omp.loop.exit:
// CHECK15-NEXT: [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK15-NEXT: [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
// CHECK15-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
// CHECK15-NEXT: br label [[OMP_PRECOND_END]]
// CHECK15: omp.precond.end:
// CHECK15-NEXT: ret void
//
//
// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..12
// CHECK15-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK15-NEXT: entry:
// CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK15-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK15-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK15-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[I4:%.*]] = alloca i32, align 4
// CHECK15-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK15-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK15-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK15-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK15-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK15-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK15-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK15-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK15-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK15-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK15-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK15-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK15-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK15-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK15-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK15-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK15-NEXT: store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK15-NEXT: store i32 0, i32* [[I]], align 4
// CHECK15-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK15-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK15-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK15: omp.precond.then:
// CHECK15-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK15-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK15-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
// CHECK15-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK15-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK15-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_LB]], align 4
// CHECK15-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
// CHECK15-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK15-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK15-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK15-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK15-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK15-NEXT: [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK15-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
// CHECK15-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP12]], i32 35, i32 [[TMP9]], i32 [[TMP10]], i32 1, i32 [[TMP8]])
// CHECK15-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK15: omp.dispatch.cond:
// CHECK15-NEXT: [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK15-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
// CHECK15-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
// CHECK15-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP15]], 0
// CHECK15-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK15: omp.dispatch.body:
// CHECK15-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK15-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK15: omp.inner.for.cond:
// CHECK15-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
// CHECK15-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !19
// CHECK15-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
// CHECK15-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK15: omp.inner.for.body:
// CHECK15-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
// CHECK15-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
// CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK15-NEXT: store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !19
// CHECK15-NEXT: [[TMP20:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !19
// CHECK15-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 [[TMP20]]
// CHECK15-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !19
// CHECK15-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK15: omp.body.continue:
// CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK15: omp.inner.for.inc:
// CHECK15-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
// CHECK15-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP21]], 1
// CHECK15-NEXT: store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]]
// CHECK15: omp.inner.for.end:
// CHECK15-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK15: omp.dispatch.inc:
// CHECK15-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK15: omp.dispatch.end:
// CHECK15-NEXT: br label [[OMP_PRECOND_END]]
// CHECK15: omp.precond.end:
// CHECK15-NEXT: ret void
//
//
// CHECK15-LABEL: define {{[^@]+}}@_Z5tmainIiLi10EEiT_
// CHECK15-SAME: (i32 [[ARGC:%.*]]) #[[ATTR4:[0-9]+]] comdat {
// CHECK15-NEXT: entry:
// CHECK15-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[A:%.*]] = alloca [10 x i32], align 4
// CHECK15-NEXT: [[M:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 4
// CHECK15-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 4
// CHECK15-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 4
// CHECK15-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOFFLOAD_BASEPTRS1:%.*]] = alloca [1 x i8*], align 4
// CHECK15-NEXT: [[DOTOFFLOAD_PTRS2:%.*]] = alloca [1 x i8*], align 4
// CHECK15-NEXT: [[DOTOFFLOAD_MAPPERS3:%.*]] = alloca [1 x i8*], align 4
// CHECK15-NEXT: [[_TMP4:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOFFLOAD_BASEPTRS7:%.*]] = alloca [2 x i8*], align 4
// CHECK15-NEXT: [[DOTOFFLOAD_PTRS8:%.*]] = alloca [2 x i8*], align 4
// CHECK15-NEXT: [[DOTOFFLOAD_MAPPERS9:%.*]] = alloca [2 x i8*], align 4
// CHECK15-NEXT: [[_TMP10:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOFFLOAD_BASEPTRS13:%.*]] = alloca [1 x i8*], align 4
// CHECK15-NEXT: [[DOTOFFLOAD_PTRS14:%.*]] = alloca [1 x i8*], align 4
// CHECK15-NEXT: [[DOTOFFLOAD_MAPPERS15:%.*]] = alloca [1 x i8*], align 4
// CHECK15-NEXT: [[_TMP16:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTCAPTURE_EXPR_19:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTCAPTURE_EXPR__CASTED20:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOFFLOAD_BASEPTRS21:%.*]] = alloca [2 x i8*], align 4
// CHECK15-NEXT: [[DOTOFFLOAD_PTRS22:%.*]] = alloca [2 x i8*], align 4
// CHECK15-NEXT: [[DOTOFFLOAD_MAPPERS23:%.*]] = alloca [2 x i8*], align 4
// CHECK15-NEXT: [[_TMP24:%.*]] = alloca i32, align 4
// CHECK15-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
// CHECK15-NEXT: store i32 10, i32* [[M]], align 4
// CHECK15-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK15-NEXT: [[TMP1:%.*]] = bitcast i8** [[TMP0]] to [10 x i32]**
// CHECK15-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP1]], align 4
// CHECK15-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK15-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to [10 x i32]**
// CHECK15-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP3]], align 4
// CHECK15-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// CHECK15-NEXT: store i8* null, i8** [[TMP4]], align 4
// CHECK15-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK15-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
// CHECK15-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK15-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0
// CHECK15-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK15: omp_offload.failed:
// CHECK15-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112([10 x i32]* [[A]]) #[[ATTR3]]
// CHECK15-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK15: omp_offload.cont:
// CHECK15-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
// CHECK15-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to [10 x i32]**
// CHECK15-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP10]], align 4
// CHECK15-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
// CHECK15-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to [10 x i32]**
// CHECK15-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP12]], align 4
// CHECK15-NEXT: [[TMP13:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i32 0, i32 0
// CHECK15-NEXT: store i8* null, i8** [[TMP13]], align 4
// CHECK15-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
// CHECK15-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
// CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
// CHECK15-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK15-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
// CHECK15-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]]
// CHECK15: omp_offload.failed5:
// CHECK15-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116([10 x i32]* [[A]]) #[[ATTR3]]
// CHECK15-NEXT: br label [[OMP_OFFLOAD_CONT6]]
// CHECK15: omp_offload.cont6:
// CHECK15-NEXT: [[TMP18:%.*]] = load i32, i32* [[M]], align 4
// CHECK15-NEXT: store i32 [[TMP18]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK15-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK15-NEXT: store i32 [[TMP19]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK15-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK15-NEXT: [[TMP21:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0
// CHECK15-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to [10 x i32]**
// CHECK15-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP22]], align 4
// CHECK15-NEXT: [[TMP23:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0
// CHECK15-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to [10 x i32]**
// CHECK15-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP24]], align 4
// CHECK15-NEXT: [[TMP25:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i32 0, i32 0
// CHECK15-NEXT: store i8* null, i8** [[TMP25]], align 4
// CHECK15-NEXT: [[TMP26:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 1
// CHECK15-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i32*
// CHECK15-NEXT: store i32 [[TMP20]], i32* [[TMP27]], align 4
// CHECK15-NEXT: [[TMP28:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 1
// CHECK15-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32*
// CHECK15-NEXT: store i32 [[TMP20]], i32* [[TMP29]], align 4
// CHECK15-NEXT: [[TMP30:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i32 0, i32 1
// CHECK15-NEXT: store i8* null, i8** [[TMP30]], align 4
// CHECK15-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0
// CHECK15-NEXT: [[TMP32:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0
// CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
// CHECK15-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.23, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.24, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK15-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0
// CHECK15-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]]
// CHECK15: omp_offload.failed11:
// CHECK15-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120([10 x i32]* [[A]], i32 [[TMP20]]) #[[ATTR3]]
// CHECK15-NEXT: br label [[OMP_OFFLOAD_CONT12]]
// CHECK15: omp_offload.cont12:
// CHECK15-NEXT: [[TMP35:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0
// CHECK15-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to [10 x i32]**
// CHECK15-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP36]], align 4
// CHECK15-NEXT: [[TMP37:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0
// CHECK15-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to [10 x i32]**
// CHECK15-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP38]], align 4
// CHECK15-NEXT: [[TMP39:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 0
// CHECK15-NEXT: store i8* null, i8** [[TMP39]], align 4
// CHECK15-NEXT: [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0
// CHECK15-NEXT: [[TMP41:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0
// CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
// CHECK15-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124.region_id, i32 1, i8** [[TMP40]], i8** [[TMP41]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.27, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.28, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK15-NEXT: [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0
// CHECK15-NEXT: br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]]
// CHECK15: omp_offload.failed17:
// CHECK15-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124([10 x i32]* [[A]]) #[[ATTR3]]
// CHECK15-NEXT: br label [[OMP_OFFLOAD_CONT18]]
// CHECK15: omp_offload.cont18:
// CHECK15-NEXT: [[TMP44:%.*]] = load i32, i32* [[M]], align 4
// CHECK15-NEXT: store i32 [[TMP44]], i32* [[DOTCAPTURE_EXPR_19]], align 4
// CHECK15-NEXT: [[TMP45:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_19]], align 4
// CHECK15-NEXT: store i32 [[TMP45]], i32* [[DOTCAPTURE_EXPR__CASTED20]], align 4
// CHECK15-NEXT: [[TMP46:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED20]], align 4
// CHECK15-NEXT: [[TMP47:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0
// CHECK15-NEXT: [[TMP48:%.*]] = bitcast i8** [[TMP47]] to [10 x i32]**
// CHECK15-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP48]], align 4
// CHECK15-NEXT: [[TMP49:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0
// CHECK15-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to [10 x i32]**
// CHECK15-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP50]], align 4
// CHECK15-NEXT: [[TMP51:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 0
// CHECK15-NEXT: store i8* null, i8** [[TMP51]], align 4
// CHECK15-NEXT: [[TMP52:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 1
// CHECK15-NEXT: [[TMP53:%.*]] = bitcast i8** [[TMP52]] to i32*
// CHECK15-NEXT: store i32 [[TMP46]], i32* [[TMP53]], align 4
// CHECK15-NEXT: [[TMP54:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 1
// CHECK15-NEXT: [[TMP55:%.*]] = bitcast i8** [[TMP54]] to i32*
// CHECK15-NEXT: store i32 [[TMP46]], i32* [[TMP55]], align 4
// CHECK15-NEXT: [[TMP56:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 1
// CHECK15-NEXT: store i8* null, i8** [[TMP56]], align 4
// CHECK15-NEXT: [[TMP57:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0
// CHECK15-NEXT: [[TMP58:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0
// CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
// CHECK15-NEXT: [[TMP59:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128.region_id, i32 2, i8** [[TMP57]], i8** [[TMP58]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.31, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.32, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK15-NEXT: [[TMP60:%.*]] = icmp ne i32 [[TMP59]], 0
// CHECK15-NEXT: br i1 [[TMP60]], label [[OMP_OFFLOAD_FAILED25:%.*]], label [[OMP_OFFLOAD_CONT26:%.*]]
// CHECK15: omp_offload.failed25:
// CHECK15-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128([10 x i32]* [[A]], i32 [[TMP46]]) #[[ATTR3]]
// CHECK15-NEXT: br label [[OMP_OFFLOAD_CONT26]]
// CHECK15: omp_offload.cont26:
// CHECK15-NEXT: ret i32 0
//
//
// CHECK15-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112
// CHECK15-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK15-NEXT: entry:
// CHECK15-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK15-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
// CHECK15-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]])
// CHECK15-NEXT: ret void
//
//
// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..14
// CHECK15-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK15-NEXT: entry:
// CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK15-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK15-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK15-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK15-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK15-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK15-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
// CHECK15-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
// CHECK15-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK15-NEXT: store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK15-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK15-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK15-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK15-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK15-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
// CHECK15-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK15: cond.true:
// CHECK15-NEXT: br label [[COND_END:%.*]]
// CHECK15: cond.false:
// CHECK15-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: br label [[COND_END]]
// CHECK15: cond.end:
// CHECK15-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK15-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK15-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK15: omp.inner.for.cond:
// CHECK15-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK15-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK15: omp.inner.for.body:
// CHECK15-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK15-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]])
// CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK15: omp.inner.for.inc:
// CHECK15-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
// CHECK15-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK15: omp.inner.for.end:
// CHECK15-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK15: omp.loop.exit:
// CHECK15-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK15-NEXT: ret void
//
//
// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..15
// CHECK15-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK15-NEXT: entry:
// CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK15-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK15-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK15-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK15-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK15-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK15-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK15-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK15-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
// CHECK15-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
// CHECK15-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK15-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4
// CHECK15-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK15-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK15-NEXT: store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
// CHECK15-NEXT: store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
// CHECK15-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK15-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK15-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK15-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK15-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK15-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK15-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 9
// CHECK15-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK15: cond.true:
// CHECK15-NEXT: br label [[COND_END:%.*]]
// CHECK15: cond.false:
// CHECK15-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK15-NEXT: br label [[COND_END]]
// CHECK15: cond.end:
// CHECK15-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
// CHECK15-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK15-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK15-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK15: omp.inner.for.cond:
// CHECK15-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK15-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
// CHECK15-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK15: omp.inner.for.body:
// CHECK15-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
// CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK15-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK15-NEXT: [[TMP11:%.*]] = load i32, i32* [[I]], align 4
// CHECK15-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 [[TMP11]]
// CHECK15-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK15-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK15: omp.body.continue:
// CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK15: omp.inner.for.inc:
// CHECK15-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP12]], 1
// CHECK15-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK15: omp.inner.for.end:
// CHECK15-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK15: omp.loop.exit:
// CHECK15-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
// CHECK15-NEXT: ret void
//
//
// CHECK15-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116
// CHECK15-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK15-NEXT: entry:
// CHECK15-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK15-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
// CHECK15-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..17 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]])
// CHECK15-NEXT: ret void
//
//
// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..17
// CHECK15-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK15-NEXT: entry:
// CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK15-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK15-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK15-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK15-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK15-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK15-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
// CHECK15-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
// CHECK15-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK15-NEXT: store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK15-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK15-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK15-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK15-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK15-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
// CHECK15-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK15: cond.true:
// CHECK15-NEXT: br label [[COND_END:%.*]]
// CHECK15: cond.false:
// CHECK15-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: br label [[COND_END]]
// CHECK15: cond.end:
// CHECK15-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK15-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK15-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK15: omp.inner.for.cond:
// CHECK15-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK15-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK15: omp.inner.for.body:
// CHECK15-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK15-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]])
// CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK15: omp.inner.for.inc:
// CHECK15-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
// CHECK15-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK15: omp.inner.for.end:
// CHECK15-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK15: omp.loop.exit:
// CHECK15-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK15-NEXT: ret void
//
//
// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..18
// CHECK15-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK15-NEXT: entry:
// CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK15-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK15-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK15-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK15-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK15-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK15-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK15-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK15-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
// CHECK15-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
// CHECK15-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK15-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4
// CHECK15-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK15-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK15-NEXT: store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
// CHECK15-NEXT: store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
// CHECK15-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK15-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK15-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK15-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK15-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK15-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK15-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 9
// CHECK15-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK15: cond.true:
// CHECK15-NEXT: br label [[COND_END:%.*]]
// CHECK15: cond.false:
// CHECK15-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK15-NEXT: br label [[COND_END]]
// CHECK15: cond.end:
// CHECK15-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
// CHECK15-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK15-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK15-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK15: omp.inner.for.cond:
// CHECK15-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK15-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
// CHECK15-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK15: omp.inner.for.body:
// CHECK15-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
// CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK15-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK15-NEXT: [[TMP11:%.*]] = load i32, i32* [[I]], align 4
// CHECK15-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 [[TMP11]]
// CHECK15-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK15-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK15: omp.body.continue:
// CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK15: omp.inner.for.inc:
// CHECK15-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP12]], 1
// CHECK15-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK15: omp.inner.for.end:
// CHECK15-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK15: omp.loop.exit:
// CHECK15-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
// CHECK15-NEXT: ret void
//
//
// CHECK15-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120
// CHECK15-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK15-NEXT: entry:
// CHECK15-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK15-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
// CHECK15-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
// CHECK15-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK15-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
// CHECK15-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK15-NEXT: store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK15-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..21 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP2]])
// CHECK15-NEXT: ret void
//
//
// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..21
// CHECK15-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK15-NEXT: entry:
// CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK15-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK15-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK15-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
// CHECK15-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK15-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK15-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
// CHECK15-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK15-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
// CHECK15-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK15-NEXT: store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK15-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK15-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK15-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK15-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK15-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
// CHECK15-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK15: cond.true:
// CHECK15-NEXT: br label [[COND_END:%.*]]
// CHECK15: cond.false:
// CHECK15-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: br label [[COND_END]]
// CHECK15: cond.end:
// CHECK15-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK15-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK15-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK15: omp.inner.for.cond:
// CHECK15-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK15-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK15: omp.inner.for.body:
// CHECK15-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK15-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK15-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK15-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]])
// CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK15: omp.inner.for.inc:
// CHECK15-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
// CHECK15-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK15: omp.inner.for.end:
// CHECK15-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK15: omp.loop.exit:
// CHECK15-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK15-NEXT: ret void
//
//
// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..22
// CHECK15-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK15-NEXT: entry:
// CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK15-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK15-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK15-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK15-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK15-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK15-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK15-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK15-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
// CHECK15-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK15-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
// CHECK15-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK15-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4
// CHECK15-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK15-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK15-NEXT: store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
// CHECK15-NEXT: store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
// CHECK15-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK15-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK15-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK15-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK15-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
// CHECK15-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP5]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP3]])
// CHECK15-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK15: omp.dispatch.cond:
// CHECK15-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK15-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK15-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], [[TMP7]]
// CHECK15-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK15: cond.true:
// CHECK15-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK15-NEXT: br label [[COND_END:%.*]]
// CHECK15: cond.false:
// CHECK15-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK15-NEXT: br label [[COND_END]]
// CHECK15: cond.end:
// CHECK15-NEXT: [[COND:%.*]] = phi i32 [ [[TMP8]], [[COND_TRUE]] ], [ [[TMP9]], [[COND_FALSE]] ]
// CHECK15-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK15-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK15-NEXT: store i32 [[TMP10]], i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK15-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]]
// CHECK15-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK15: omp.dispatch.body:
// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK15: omp.inner.for.cond:
// CHECK15-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK15-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
// CHECK15-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK15: omp.inner.for.body:
// CHECK15-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP15]], 1
// CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK15-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK15-NEXT: [[TMP16:%.*]] = load i32, i32* [[I]], align 4
// CHECK15-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 [[TMP16]]
// CHECK15-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK15-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK15: omp.body.continue:
// CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK15: omp.inner.for.inc:
// CHECK15-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP17]], 1
// CHECK15-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK15: omp.inner.for.end:
// CHECK15-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK15: omp.dispatch.inc:
// CHECK15-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK15-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK15-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
// CHECK15-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_LB]], align 4
// CHECK15-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK15-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK15-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
// CHECK15-NEXT: store i32 [[ADD5]], i32* [[DOTOMP_UB]], align 4
// CHECK15-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK15: omp.dispatch.end:
// CHECK15-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
// CHECK15-NEXT: ret void
//
//
// CHECK15-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124
// CHECK15-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK15-NEXT: entry:
// CHECK15-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK15-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
// CHECK15-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..25 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]])
// CHECK15-NEXT: ret void
//
//
// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..25
// CHECK15-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK15-NEXT: entry:
// CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK15-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK15-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK15-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK15-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK15-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK15-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
// CHECK15-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
// CHECK15-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK15-NEXT: store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK15-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK15-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK15-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK15-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK15-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
// CHECK15-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK15: cond.true:
// CHECK15-NEXT: br label [[COND_END:%.*]]
// CHECK15: cond.false:
// CHECK15-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: br label [[COND_END]]
// CHECK15: cond.end:
// CHECK15-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK15-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK15-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK15: omp.inner.for.cond:
// CHECK15-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK15-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK15: omp.inner.for.body:
// CHECK15-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK15-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]])
// CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK15: omp.inner.for.inc:
// CHECK15-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
// CHECK15-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK15: omp.inner.for.end:
// CHECK15-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK15: omp.loop.exit:
// CHECK15-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK15-NEXT: ret void
//
//
// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..26
// CHECK15-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK15-NEXT: entry:
// CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK15-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK15-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK15-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK15-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK15-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK15-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK15-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK15-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
// CHECK15-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
// CHECK15-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK15-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4
// CHECK15-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK15-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK15-NEXT: store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
// CHECK15-NEXT: store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
// CHECK15-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK15-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK15-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK15-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK15-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK15-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
// CHECK15-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32 35, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 1)
// CHECK15-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK15: omp.dispatch.cond:
// CHECK15-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
// CHECK15-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
// CHECK15-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK15: omp.dispatch.body:
// CHECK15-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK15-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK15: omp.inner.for.cond:
// CHECK15-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
// CHECK15-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !22
// CHECK15-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
// CHECK15-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK15: omp.inner.for.body:
// CHECK15-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
// CHECK15-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
// CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK15-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !22
// CHECK15-NEXT: [[TMP12:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !22
// CHECK15-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 [[TMP12]]
// CHECK15-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !22
// CHECK15-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK15: omp.body.continue:
// CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK15: omp.inner.for.inc:
// CHECK15-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
// CHECK15-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP13]], 1
// CHECK15-NEXT: store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]]
// CHECK15: omp.inner.for.end:
// CHECK15-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK15: omp.dispatch.inc:
// CHECK15-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK15: omp.dispatch.end:
// CHECK15-NEXT: ret void
//
//
// CHECK15-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128
// CHECK15-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK15-NEXT: entry:
// CHECK15-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK15-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
// CHECK15-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
// CHECK15-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK15-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
// CHECK15-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK15-NEXT: store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK15-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..29 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP2]])
// CHECK15-NEXT: ret void
//
//
// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..29
// CHECK15-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK15-NEXT: entry:
// CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK15-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK15-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK15-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
// CHECK15-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK15-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK15-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
// CHECK15-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK15-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
// CHECK15-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK15-NEXT: store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK15-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK15-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK15-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK15-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK15-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
// CHECK15-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK15: cond.true:
// CHECK15-NEXT: br label [[COND_END:%.*]]
// CHECK15: cond.false:
// CHECK15-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: br label [[COND_END]]
// CHECK15: cond.end:
// CHECK15-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK15-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK15-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK15: omp.inner.for.cond:
// CHECK15-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK15-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK15: omp.inner.for.body:
// CHECK15-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK15-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK15-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK15-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK15-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..30 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]])
// CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK15: omp.inner.for.inc:
// CHECK15-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
// CHECK15-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK15: omp.inner.for.end:
// CHECK15-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK15: omp.loop.exit:
// CHECK15-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK15-NEXT: ret void
//
//
// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..30
// CHECK15-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK15-NEXT: entry:
// CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK15-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK15-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK15-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK15-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK15-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK15-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK15-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK15-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK15-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
// CHECK15-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK15-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
// CHECK15-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK15-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4
// CHECK15-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK15-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK15-NEXT: store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
// CHECK15-NEXT: store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
// CHECK15-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK15-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK15-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK15-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK15-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK15-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK15-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
// CHECK15-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], i32 35, i32 [[TMP4]], i32 [[TMP5]], i32 1, i32 [[TMP3]])
// CHECK15-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK15: omp.dispatch.cond:
// CHECK15-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
// CHECK15-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP8]], 0
// CHECK15-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK15: omp.dispatch.body:
// CHECK15-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK15-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK15: omp.inner.for.cond:
// CHECK15-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
// CHECK15-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !25
// CHECK15-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
// CHECK15-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK15: omp.inner.for.body:
// CHECK15-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
// CHECK15-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
// CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK15-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !25
// CHECK15-NEXT: [[TMP13:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !25
// CHECK15-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 [[TMP13]]
// CHECK15-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !25
// CHECK15-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK15: omp.body.continue:
// CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK15: omp.inner.for.inc:
// CHECK15-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
// CHECK15-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP14]], 1
// CHECK15-NEXT: store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]]
// CHECK15: omp.inner.for.end:
// CHECK15-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK15: omp.dispatch.inc:
// CHECK15-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK15: omp.dispatch.end:
// CHECK15-NEXT: ret void
//
//
// CHECK15-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
// CHECK15-SAME: () #[[ATTR5:[0-9]+]] {
// CHECK15-NEXT: entry:
// CHECK15-NEXT: call void @__tgt_register_requires(i64 1)
// CHECK15-NEXT: ret void
//
//
// CHECK16-LABEL: define {{[^@]+}}@main
// CHECK16-SAME: (i32 [[ARGC:%.*]], i8** [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK16-NEXT: entry:
// CHECK16-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[ARGV_ADDR:%.*]] = alloca i8**, align 4
// CHECK16-NEXT: [[N:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4
// CHECK16-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[M:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4
// CHECK16-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4
// CHECK16-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4
// CHECK16-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4
// CHECK16-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[N_CASTED3:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [3 x i8*], align 4
// CHECK16-NEXT: [[DOTOFFLOAD_PTRS5:%.*]] = alloca [3 x i8*], align 4
// CHECK16-NEXT: [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [3 x i8*], align 4
// CHECK16-NEXT: [[DOTOFFLOAD_SIZES7:%.*]] = alloca [3 x i64], align 4
// CHECK16-NEXT: [[_TMP8:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTCAPTURE_EXPR_17:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[N_CASTED18:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOFFLOAD_BASEPTRS19:%.*]] = alloca [4 x i8*], align 4
// CHECK16-NEXT: [[DOTOFFLOAD_PTRS20:%.*]] = alloca [4 x i8*], align 4
// CHECK16-NEXT: [[DOTOFFLOAD_MAPPERS21:%.*]] = alloca [4 x i8*], align 4
// CHECK16-NEXT: [[DOTOFFLOAD_SIZES22:%.*]] = alloca [4 x i64], align 4
// CHECK16-NEXT: [[_TMP23:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[N_CASTED32:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOFFLOAD_BASEPTRS33:%.*]] = alloca [3 x i8*], align 4
// CHECK16-NEXT: [[DOTOFFLOAD_PTRS34:%.*]] = alloca [3 x i8*], align 4
// CHECK16-NEXT: [[DOTOFFLOAD_MAPPERS35:%.*]] = alloca [3 x i8*], align 4
// CHECK16-NEXT: [[DOTOFFLOAD_SIZES36:%.*]] = alloca [3 x i64], align 4
// CHECK16-NEXT: [[_TMP37:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTCAPTURE_EXPR_38:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTCAPTURE_EXPR_39:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTCAPTURE_EXPR_46:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[N_CASTED47:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTCAPTURE_EXPR__CASTED48:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOFFLOAD_BASEPTRS49:%.*]] = alloca [4 x i8*], align 4
// CHECK16-NEXT: [[DOTOFFLOAD_PTRS50:%.*]] = alloca [4 x i8*], align 4
// CHECK16-NEXT: [[DOTOFFLOAD_MAPPERS51:%.*]] = alloca [4 x i8*], align 4
// CHECK16-NEXT: [[DOTOFFLOAD_SIZES52:%.*]] = alloca [4 x i64], align 4
// CHECK16-NEXT: [[_TMP53:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTCAPTURE_EXPR_54:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTCAPTURE_EXPR_55:%.*]] = alloca i32, align 4
// CHECK16-NEXT: store i32 0, i32* [[RETVAL]], align 4
// CHECK16-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
// CHECK16-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 4
// CHECK16-NEXT: store i32 100, i32* [[N]], align 4
// CHECK16-NEXT: [[TMP0:%.*]] = load i32, i32* [[N]], align 4
// CHECK16-NEXT: [[TMP1:%.*]] = call i8* @llvm.stacksave()
// CHECK16-NEXT: store i8* [[TMP1]], i8** [[SAVED_STACK]], align 4
// CHECK16-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4
// CHECK16-NEXT: store i32 [[TMP0]], i32* [[__VLA_EXPR0]], align 4
// CHECK16-NEXT: store i32 10, i32* [[M]], align 4
// CHECK16-NEXT: [[TMP2:%.*]] = load i32, i32* [[N]], align 4
// CHECK16-NEXT: store i32 [[TMP2]], i32* [[N_CASTED]], align 4
// CHECK16-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
// CHECK16-NEXT: [[TMP4:%.*]] = mul nuw i32 [[TMP0]], 4
// CHECK16-NEXT: [[TMP5:%.*]] = sext i32 [[TMP4]] to i64
// CHECK16-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK16-NEXT: [[TMP7:%.*]] = bitcast i8** [[TMP6]] to i32*
// CHECK16-NEXT: store i32 [[TMP3]], i32* [[TMP7]], align 4
// CHECK16-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK16-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i32*
// CHECK16-NEXT: store i32 [[TMP3]], i32* [[TMP9]], align 4
// CHECK16-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
// CHECK16-NEXT: store i64 4, i64* [[TMP10]], align 4
// CHECK16-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// CHECK16-NEXT: store i8* null, i8** [[TMP11]], align 4
// CHECK16-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
// CHECK16-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32*
// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP13]], align 4
// CHECK16-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
// CHECK16-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32*
// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP15]], align 4
// CHECK16-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1
// CHECK16-NEXT: store i64 4, i64* [[TMP16]], align 4
// CHECK16-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
// CHECK16-NEXT: store i8* null, i8** [[TMP17]], align 4
// CHECK16-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
// CHECK16-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32**
// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 4
// CHECK16-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
// CHECK16-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32**
// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 4
// CHECK16-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2
// CHECK16-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 4
// CHECK16-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
// CHECK16-NEXT: store i8* null, i8** [[TMP23]], align 4
// CHECK16-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK16-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK16-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
// CHECK16-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4
// CHECK16-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK16-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK16-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0
// CHECK16-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK16-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK16-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK16-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK16-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1
// CHECK16-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64
// CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]])
// CHECK16-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK16-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
// CHECK16-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK16: omp_offload.failed:
// CHECK16-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139(i32 [[TMP3]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3:[0-9]+]]
// CHECK16-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK16: omp_offload.cont:
// CHECK16-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4
// CHECK16-NEXT: store i32 [[TMP33]], i32* [[N_CASTED3]], align 4
// CHECK16-NEXT: [[TMP34:%.*]] = load i32, i32* [[N_CASTED3]], align 4
// CHECK16-NEXT: [[TMP35:%.*]] = mul nuw i32 [[TMP0]], 4
// CHECK16-NEXT: [[TMP36:%.*]] = sext i32 [[TMP35]] to i64
// CHECK16-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
// CHECK16-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32*
// CHECK16-NEXT: store i32 [[TMP34]], i32* [[TMP38]], align 4
// CHECK16-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
// CHECK16-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i32*
// CHECK16-NEXT: store i32 [[TMP34]], i32* [[TMP40]], align 4
// CHECK16-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0
// CHECK16-NEXT: store i64 4, i64* [[TMP41]], align 4
// CHECK16-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0
// CHECK16-NEXT: store i8* null, i8** [[TMP42]], align 4
// CHECK16-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1
// CHECK16-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32*
// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP44]], align 4
// CHECK16-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1
// CHECK16-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32*
// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP46]], align 4
// CHECK16-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 1
// CHECK16-NEXT: store i64 4, i64* [[TMP47]], align 4
// CHECK16-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1
// CHECK16-NEXT: store i8* null, i8** [[TMP48]], align 4
// CHECK16-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2
// CHECK16-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to i32**
// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP50]], align 4
// CHECK16-NEXT: [[TMP51:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2
// CHECK16-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i32**
// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP52]], align 4
// CHECK16-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 2
// CHECK16-NEXT: store i64 [[TMP36]], i64* [[TMP53]], align 4
// CHECK16-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2
// CHECK16-NEXT: store i8* null, i8** [[TMP54]], align 4
// CHECK16-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
// CHECK16-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
// CHECK16-NEXT: [[TMP57:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0
// CHECK16-NEXT: [[TMP58:%.*]] = load i32, i32* [[N]], align 4
// CHECK16-NEXT: store i32 [[TMP58]], i32* [[DOTCAPTURE_EXPR_9]], align 4
// CHECK16-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4
// CHECK16-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP59]], 0
// CHECK16-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1
// CHECK16-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1
// CHECK16-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4
// CHECK16-NEXT: [[TMP60:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4
// CHECK16-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP60]], 1
// CHECK16-NEXT: [[TMP61:%.*]] = zext i32 [[ADD14]] to i64
// CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP61]])
// CHECK16-NEXT: [[TMP62:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143.region_id, i32 3, i8** [[TMP55]], i8** [[TMP56]], i64* [[TMP57]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK16-NEXT: [[TMP63:%.*]] = icmp ne i32 [[TMP62]], 0
// CHECK16-NEXT: br i1 [[TMP63]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]]
// CHECK16: omp_offload.failed15:
// CHECK16-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143(i32 [[TMP34]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]]
// CHECK16-NEXT: br label [[OMP_OFFLOAD_CONT16]]
// CHECK16: omp_offload.cont16:
// CHECK16-NEXT: [[TMP64:%.*]] = load i32, i32* [[M]], align 4
// CHECK16-NEXT: store i32 [[TMP64]], i32* [[DOTCAPTURE_EXPR_17]], align 4
// CHECK16-NEXT: [[TMP65:%.*]] = load i32, i32* [[N]], align 4
// CHECK16-NEXT: store i32 [[TMP65]], i32* [[N_CASTED18]], align 4
// CHECK16-NEXT: [[TMP66:%.*]] = load i32, i32* [[N_CASTED18]], align 4
// CHECK16-NEXT: [[TMP67:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_17]], align 4
// CHECK16-NEXT: store i32 [[TMP67]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK16-NEXT: [[TMP68:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK16-NEXT: [[TMP69:%.*]] = mul nuw i32 [[TMP0]], 4
// CHECK16-NEXT: [[TMP70:%.*]] = sext i32 [[TMP69]] to i64
// CHECK16-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0
// CHECK16-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i32*
// CHECK16-NEXT: store i32 [[TMP66]], i32* [[TMP72]], align 4
// CHECK16-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 0
// CHECK16-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32*
// CHECK16-NEXT: store i32 [[TMP66]], i32* [[TMP74]], align 4
// CHECK16-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 0
// CHECK16-NEXT: store i64 4, i64* [[TMP75]], align 4
// CHECK16-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 0
// CHECK16-NEXT: store i8* null, i8** [[TMP76]], align 4
// CHECK16-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 1
// CHECK16-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i32*
// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP78]], align 4
// CHECK16-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 1
// CHECK16-NEXT: [[TMP80:%.*]] = bitcast i8** [[TMP79]] to i32*
// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP80]], align 4
// CHECK16-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 1
// CHECK16-NEXT: store i64 4, i64* [[TMP81]], align 4
// CHECK16-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 1
// CHECK16-NEXT: store i8* null, i8** [[TMP82]], align 4
// CHECK16-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 2
// CHECK16-NEXT: [[TMP84:%.*]] = bitcast i8** [[TMP83]] to i32**
// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP84]], align 4
// CHECK16-NEXT: [[TMP85:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 2
// CHECK16-NEXT: [[TMP86:%.*]] = bitcast i8** [[TMP85]] to i32**
// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP86]], align 4
// CHECK16-NEXT: [[TMP87:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 2
// CHECK16-NEXT: store i64 [[TMP70]], i64* [[TMP87]], align 4
// CHECK16-NEXT: [[TMP88:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 2
// CHECK16-NEXT: store i8* null, i8** [[TMP88]], align 4
// CHECK16-NEXT: [[TMP89:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 3
// CHECK16-NEXT: [[TMP90:%.*]] = bitcast i8** [[TMP89]] to i32*
// CHECK16-NEXT: store i32 [[TMP68]], i32* [[TMP90]], align 4
// CHECK16-NEXT: [[TMP91:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 3
// CHECK16-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to i32*
// CHECK16-NEXT: store i32 [[TMP68]], i32* [[TMP92]], align 4
// CHECK16-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 3
// CHECK16-NEXT: store i64 4, i64* [[TMP93]], align 4
// CHECK16-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 3
// CHECK16-NEXT: store i8* null, i8** [[TMP94]], align 4
// CHECK16-NEXT: [[TMP95:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0
// CHECK16-NEXT: [[TMP96:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 0
// CHECK16-NEXT: [[TMP97:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 0
// CHECK16-NEXT: [[TMP98:%.*]] = load i32, i32* [[N]], align 4
// CHECK16-NEXT: store i32 [[TMP98]], i32* [[DOTCAPTURE_EXPR_24]], align 4
// CHECK16-NEXT: [[TMP99:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4
// CHECK16-NEXT: [[SUB26:%.*]] = sub nsw i32 [[TMP99]], 0
// CHECK16-NEXT: [[DIV27:%.*]] = sdiv i32 [[SUB26]], 1
// CHECK16-NEXT: [[SUB28:%.*]] = sub nsw i32 [[DIV27]], 1
// CHECK16-NEXT: store i32 [[SUB28]], i32* [[DOTCAPTURE_EXPR_25]], align 4
// CHECK16-NEXT: [[TMP100:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4
// CHECK16-NEXT: [[ADD29:%.*]] = add nsw i32 [[TMP100]], 1
// CHECK16-NEXT: [[TMP101:%.*]] = zext i32 [[ADD29]] to i64
// CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP101]])
// CHECK16-NEXT: [[TMP102:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147.region_id, i32 4, i8** [[TMP95]], i8** [[TMP96]], i64* [[TMP97]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK16-NEXT: [[TMP103:%.*]] = icmp ne i32 [[TMP102]], 0
// CHECK16-NEXT: br i1 [[TMP103]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]]
// CHECK16: omp_offload.failed30:
// CHECK16-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147(i32 [[TMP66]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP68]]) #[[ATTR3]]
// CHECK16-NEXT: br label [[OMP_OFFLOAD_CONT31]]
// CHECK16: omp_offload.cont31:
// CHECK16-NEXT: [[TMP104:%.*]] = load i32, i32* [[N]], align 4
// CHECK16-NEXT: store i32 [[TMP104]], i32* [[N_CASTED32]], align 4
// CHECK16-NEXT: [[TMP105:%.*]] = load i32, i32* [[N_CASTED32]], align 4
// CHECK16-NEXT: [[TMP106:%.*]] = mul nuw i32 [[TMP0]], 4
// CHECK16-NEXT: [[TMP107:%.*]] = sext i32 [[TMP106]] to i64
// CHECK16-NEXT: [[TMP108:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0
// CHECK16-NEXT: [[TMP109:%.*]] = bitcast i8** [[TMP108]] to i32*
// CHECK16-NEXT: store i32 [[TMP105]], i32* [[TMP109]], align 4
// CHECK16-NEXT: [[TMP110:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0
// CHECK16-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to i32*
// CHECK16-NEXT: store i32 [[TMP105]], i32* [[TMP111]], align 4
// CHECK16-NEXT: [[TMP112:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 0
// CHECK16-NEXT: store i64 4, i64* [[TMP112]], align 4
// CHECK16-NEXT: [[TMP113:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i32 0, i32 0
// CHECK16-NEXT: store i8* null, i8** [[TMP113]], align 4
// CHECK16-NEXT: [[TMP114:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 1
// CHECK16-NEXT: [[TMP115:%.*]] = bitcast i8** [[TMP114]] to i32*
// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP115]], align 4
// CHECK16-NEXT: [[TMP116:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 1
// CHECK16-NEXT: [[TMP117:%.*]] = bitcast i8** [[TMP116]] to i32*
// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP117]], align 4
// CHECK16-NEXT: [[TMP118:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 1
// CHECK16-NEXT: store i64 4, i64* [[TMP118]], align 4
// CHECK16-NEXT: [[TMP119:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i32 0, i32 1
// CHECK16-NEXT: store i8* null, i8** [[TMP119]], align 4
// CHECK16-NEXT: [[TMP120:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 2
// CHECK16-NEXT: [[TMP121:%.*]] = bitcast i8** [[TMP120]] to i32**
// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP121]], align 4
// CHECK16-NEXT: [[TMP122:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 2
// CHECK16-NEXT: [[TMP123:%.*]] = bitcast i8** [[TMP122]] to i32**
// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP123]], align 4
// CHECK16-NEXT: [[TMP124:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 2
// CHECK16-NEXT: store i64 [[TMP107]], i64* [[TMP124]], align 4
// CHECK16-NEXT: [[TMP125:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i32 0, i32 2
// CHECK16-NEXT: store i8* null, i8** [[TMP125]], align 4
// CHECK16-NEXT: [[TMP126:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0
// CHECK16-NEXT: [[TMP127:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0
// CHECK16-NEXT: [[TMP128:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 0
// CHECK16-NEXT: [[TMP129:%.*]] = load i32, i32* [[N]], align 4
// CHECK16-NEXT: store i32 [[TMP129]], i32* [[DOTCAPTURE_EXPR_38]], align 4
// CHECK16-NEXT: [[TMP130:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_38]], align 4
// CHECK16-NEXT: [[SUB40:%.*]] = sub nsw i32 [[TMP130]], 0
// CHECK16-NEXT: [[DIV41:%.*]] = sdiv i32 [[SUB40]], 1
// CHECK16-NEXT: [[SUB42:%.*]] = sub nsw i32 [[DIV41]], 1
// CHECK16-NEXT: store i32 [[SUB42]], i32* [[DOTCAPTURE_EXPR_39]], align 4
// CHECK16-NEXT: [[TMP131:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_39]], align 4
// CHECK16-NEXT: [[ADD43:%.*]] = add nsw i32 [[TMP131]], 1
// CHECK16-NEXT: [[TMP132:%.*]] = zext i32 [[ADD43]] to i64
// CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP132]])
// CHECK16-NEXT: [[TMP133:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151.region_id, i32 3, i8** [[TMP126]], i8** [[TMP127]], i64* [[TMP128]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK16-NEXT: [[TMP134:%.*]] = icmp ne i32 [[TMP133]], 0
// CHECK16-NEXT: br i1 [[TMP134]], label [[OMP_OFFLOAD_FAILED44:%.*]], label [[OMP_OFFLOAD_CONT45:%.*]]
// CHECK16: omp_offload.failed44:
// CHECK16-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151(i32 [[TMP105]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]]
// CHECK16-NEXT: br label [[OMP_OFFLOAD_CONT45]]
// CHECK16: omp_offload.cont45:
// CHECK16-NEXT: [[TMP135:%.*]] = load i32, i32* [[M]], align 4
// CHECK16-NEXT: store i32 [[TMP135]], i32* [[DOTCAPTURE_EXPR_46]], align 4
// CHECK16-NEXT: [[TMP136:%.*]] = load i32, i32* [[N]], align 4
// CHECK16-NEXT: store i32 [[TMP136]], i32* [[N_CASTED47]], align 4
// CHECK16-NEXT: [[TMP137:%.*]] = load i32, i32* [[N_CASTED47]], align 4
// CHECK16-NEXT: [[TMP138:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_46]], align 4
// CHECK16-NEXT: store i32 [[TMP138]], i32* [[DOTCAPTURE_EXPR__CASTED48]], align 4
// CHECK16-NEXT: [[TMP139:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED48]], align 4
// CHECK16-NEXT: [[TMP140:%.*]] = mul nuw i32 [[TMP0]], 4
// CHECK16-NEXT: [[TMP141:%.*]] = sext i32 [[TMP140]] to i64
// CHECK16-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 0
// CHECK16-NEXT: [[TMP143:%.*]] = bitcast i8** [[TMP142]] to i32*
// CHECK16-NEXT: store i32 [[TMP137]], i32* [[TMP143]], align 4
// CHECK16-NEXT: [[TMP144:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 0
// CHECK16-NEXT: [[TMP145:%.*]] = bitcast i8** [[TMP144]] to i32*
// CHECK16-NEXT: store i32 [[TMP137]], i32* [[TMP145]], align 4
// CHECK16-NEXT: [[TMP146:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 0
// CHECK16-NEXT: store i64 4, i64* [[TMP146]], align 4
// CHECK16-NEXT: [[TMP147:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 0
// CHECK16-NEXT: store i8* null, i8** [[TMP147]], align 4
// CHECK16-NEXT: [[TMP148:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 1
// CHECK16-NEXT: [[TMP149:%.*]] = bitcast i8** [[TMP148]] to i32*
// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP149]], align 4
// CHECK16-NEXT: [[TMP150:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 1
// CHECK16-NEXT: [[TMP151:%.*]] = bitcast i8** [[TMP150]] to i32*
// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP151]], align 4
// CHECK16-NEXT: [[TMP152:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 1
// CHECK16-NEXT: store i64 4, i64* [[TMP152]], align 4
// CHECK16-NEXT: [[TMP153:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 1
// CHECK16-NEXT: store i8* null, i8** [[TMP153]], align 4
// CHECK16-NEXT: [[TMP154:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 2
// CHECK16-NEXT: [[TMP155:%.*]] = bitcast i8** [[TMP154]] to i32**
// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP155]], align 4
// CHECK16-NEXT: [[TMP156:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 2
// CHECK16-NEXT: [[TMP157:%.*]] = bitcast i8** [[TMP156]] to i32**
// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP157]], align 4
// CHECK16-NEXT: [[TMP158:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 2
// CHECK16-NEXT: store i64 [[TMP141]], i64* [[TMP158]], align 4
// CHECK16-NEXT: [[TMP159:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 2
// CHECK16-NEXT: store i8* null, i8** [[TMP159]], align 4
// CHECK16-NEXT: [[TMP160:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 3
// CHECK16-NEXT: [[TMP161:%.*]] = bitcast i8** [[TMP160]] to i32*
// CHECK16-NEXT: store i32 [[TMP139]], i32* [[TMP161]], align 4
// CHECK16-NEXT: [[TMP162:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 3
// CHECK16-NEXT: [[TMP163:%.*]] = bitcast i8** [[TMP162]] to i32*
// CHECK16-NEXT: store i32 [[TMP139]], i32* [[TMP163]], align 4
// CHECK16-NEXT: [[TMP164:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 3
// CHECK16-NEXT: store i64 4, i64* [[TMP164]], align 4
// CHECK16-NEXT: [[TMP165:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 3
// CHECK16-NEXT: store i8* null, i8** [[TMP165]], align 4
// CHECK16-NEXT: [[TMP166:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 0
// CHECK16-NEXT: [[TMP167:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 0
// CHECK16-NEXT: [[TMP168:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 0
// CHECK16-NEXT: [[TMP169:%.*]] = load i32, i32* [[N]], align 4
// CHECK16-NEXT: store i32 [[TMP169]], i32* [[DOTCAPTURE_EXPR_54]], align 4
// CHECK16-NEXT: [[TMP170:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_54]], align 4
// CHECK16-NEXT: [[SUB56:%.*]] = sub nsw i32 [[TMP170]], 0
// CHECK16-NEXT: [[DIV57:%.*]] = sdiv i32 [[SUB56]], 1
// CHECK16-NEXT: [[SUB58:%.*]] = sub nsw i32 [[DIV57]], 1
// CHECK16-NEXT: store i32 [[SUB58]], i32* [[DOTCAPTURE_EXPR_55]], align 4
// CHECK16-NEXT: [[TMP171:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_55]], align 4
// CHECK16-NEXT: [[ADD59:%.*]] = add nsw i32 [[TMP171]], 1
// CHECK16-NEXT: [[TMP172:%.*]] = zext i32 [[ADD59]] to i64
// CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP172]])
// CHECK16-NEXT: [[TMP173:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155.region_id, i32 4, i8** [[TMP166]], i8** [[TMP167]], i64* [[TMP168]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK16-NEXT: [[TMP174:%.*]] = icmp ne i32 [[TMP173]], 0
// CHECK16-NEXT: br i1 [[TMP174]], label [[OMP_OFFLOAD_FAILED60:%.*]], label [[OMP_OFFLOAD_CONT61:%.*]]
// CHECK16: omp_offload.failed60:
// CHECK16-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155(i32 [[TMP137]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP139]]) #[[ATTR3]]
// CHECK16-NEXT: br label [[OMP_OFFLOAD_CONT61]]
// CHECK16: omp_offload.cont61:
// CHECK16-NEXT: [[TMP175:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4
// CHECK16-NEXT: [[CALL:%.*]] = call i32 @_Z5tmainIiLi10EEiT_(i32 [[TMP175]])
// CHECK16-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4
// CHECK16-NEXT: [[TMP176:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
// CHECK16-NEXT: call void @llvm.stackrestore(i8* [[TMP176]])
// CHECK16-NEXT: [[TMP177:%.*]] = load i32, i32* [[RETVAL]], align 4
// CHECK16-NEXT: ret i32 [[TMP177]]
//
//
// CHECK16-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139
// CHECK16-SAME: (i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2:[0-9]+]] {
// CHECK16-NEXT: entry:
// CHECK16-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK16-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
// CHECK16-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK16-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK16-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK16-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK16-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK16-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK16-NEXT: store i32 [[TMP2]], i32* [[N_CASTED]], align 4
// CHECK16-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]])
// CHECK16-NEXT: ret void
//
//
// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK16-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK16-NEXT: entry:
// CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK16-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK16-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK16-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[I3:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
// CHECK16-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK16-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK16-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK16-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK16-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK16-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK16-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK16-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK16-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK16-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK16-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK16-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK16-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK16-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK16-NEXT: store i32 0, i32* [[I]], align 4
// CHECK16-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK16-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK16-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK16: omp.precond.then:
// CHECK16-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK16-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK16-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK16-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK16-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK16-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
// CHECK16-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK16-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK16-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
// CHECK16-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK16: cond.true:
// CHECK16-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK16-NEXT: br label [[COND_END:%.*]]
// CHECK16: cond.false:
// CHECK16-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: br label [[COND_END]]
// CHECK16: cond.end:
// CHECK16-NEXT: [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
// CHECK16-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK16-NEXT: store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK16: omp.inner.for.cond:
// CHECK16-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
// CHECK16-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK16: omp.inner.for.body:
// CHECK16-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK16-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: [[TMP17:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK16-NEXT: store i32 [[TMP17]], i32* [[N_CASTED]], align 4
// CHECK16-NEXT: [[TMP18:%.*]] = load i32, i32* [[N_CASTED]], align 4
// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]])
// CHECK16-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK16: omp.inner.for.inc:
// CHECK16-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK16-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
// CHECK16-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK16: omp.inner.for.end:
// CHECK16-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK16: omp.loop.exit:
// CHECK16-NEXT: [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK16-NEXT: [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
// CHECK16-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
// CHECK16-NEXT: br label [[OMP_PRECOND_END]]
// CHECK16: omp.precond.end:
// CHECK16-NEXT: ret void
//
//
// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK16-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK16-NEXT: entry:
// CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK16-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK16-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK16-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[I3:%.*]] = alloca i32, align 4
// CHECK16-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK16-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK16-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK16-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK16-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK16-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK16-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK16-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK16-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK16-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK16-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK16-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK16-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK16-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK16-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK16-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK16-NEXT: store i32 0, i32* [[I]], align 4
// CHECK16-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK16-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK16-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK16: omp.precond.then:
// CHECK16-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK16-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK16-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
// CHECK16-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK16-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK16-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_LB]], align 4
// CHECK16-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
// CHECK16-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK16-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK16-NEXT: [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK16-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
// CHECK16-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP9]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK16-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK16-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK16-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
// CHECK16-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK16: cond.true:
// CHECK16-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK16-NEXT: br label [[COND_END:%.*]]
// CHECK16: cond.false:
// CHECK16-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK16-NEXT: br label [[COND_END]]
// CHECK16: cond.end:
// CHECK16-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
// CHECK16-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK16-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK16-NEXT: store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK16: omp.inner.for.cond:
// CHECK16-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK16-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
// CHECK16-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK16: omp.inner.for.body:
// CHECK16-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP17]], 1
// CHECK16-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK16-NEXT: store i32 [[ADD]], i32* [[I3]], align 4
// CHECK16-NEXT: [[TMP18:%.*]] = load i32, i32* [[I3]], align 4
// CHECK16-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 [[TMP18]]
// CHECK16-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK16-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK16: omp.body.continue:
// CHECK16-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK16: omp.inner.for.inc:
// CHECK16-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP19]], 1
// CHECK16-NEXT: store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK16: omp.inner.for.end:
// CHECK16-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK16: omp.loop.exit:
// CHECK16-NEXT: [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK16-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
// CHECK16-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
// CHECK16-NEXT: br label [[OMP_PRECOND_END]]
// CHECK16: omp.precond.end:
// CHECK16-NEXT: ret void
//
//
// CHECK16-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143
// CHECK16-SAME: (i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK16-NEXT: entry:
// CHECK16-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK16-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
// CHECK16-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK16-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK16-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK16-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK16-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK16-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK16-NEXT: store i32 [[TMP2]], i32* [[N_CASTED]], align 4
// CHECK16-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]])
// CHECK16-NEXT: ret void
//
//
// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..2
// CHECK16-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK16-NEXT: entry:
// CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK16-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK16-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK16-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[I3:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
// CHECK16-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK16-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK16-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK16-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK16-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK16-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK16-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK16-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK16-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK16-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK16-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK16-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK16-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK16-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK16-NEXT: store i32 0, i32* [[I]], align 4
// CHECK16-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK16-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK16-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK16: omp.precond.then:
// CHECK16-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK16-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK16-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK16-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK16-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK16-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
// CHECK16-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK16-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK16-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
// CHECK16-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK16: cond.true:
// CHECK16-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK16-NEXT: br label [[COND_END:%.*]]
// CHECK16: cond.false:
// CHECK16-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: br label [[COND_END]]
// CHECK16: cond.end:
// CHECK16-NEXT: [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
// CHECK16-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK16-NEXT: store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK16: omp.inner.for.cond:
// CHECK16-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
// CHECK16-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK16: omp.inner.for.body:
// CHECK16-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK16-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: [[TMP17:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK16-NEXT: store i32 [[TMP17]], i32* [[N_CASTED]], align 4
// CHECK16-NEXT: [[TMP18:%.*]] = load i32, i32* [[N_CASTED]], align 4
// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]])
// CHECK16-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK16: omp.inner.for.inc:
// CHECK16-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK16-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
// CHECK16-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK16: omp.inner.for.end:
// CHECK16-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK16: omp.loop.exit:
// CHECK16-NEXT: [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK16-NEXT: [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
// CHECK16-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
// CHECK16-NEXT: br label [[OMP_PRECOND_END]]
// CHECK16: omp.precond.end:
// CHECK16-NEXT: ret void
//
//
// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..3
// CHECK16-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK16-NEXT: entry:
// CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK16-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK16-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK16-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[I3:%.*]] = alloca i32, align 4
// CHECK16-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK16-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK16-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK16-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK16-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK16-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK16-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK16-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK16-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK16-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK16-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK16-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK16-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK16-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK16-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK16-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK16-NEXT: store i32 0, i32* [[I]], align 4
// CHECK16-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK16-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK16-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK16: omp.precond.then:
// CHECK16-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK16-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK16-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
// CHECK16-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK16-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK16-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_LB]], align 4
// CHECK16-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
// CHECK16-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK16-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK16-NEXT: [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK16-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
// CHECK16-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP9]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK16-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK16-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK16-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
// CHECK16-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK16: cond.true:
// CHECK16-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK16-NEXT: br label [[COND_END:%.*]]
// CHECK16: cond.false:
// CHECK16-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK16-NEXT: br label [[COND_END]]
// CHECK16: cond.end:
// CHECK16-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
// CHECK16-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK16-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK16-NEXT: store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK16: omp.inner.for.cond:
// CHECK16-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK16-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
// CHECK16-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK16: omp.inner.for.body:
// CHECK16-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP17]], 1
// CHECK16-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK16-NEXT: store i32 [[ADD]], i32* [[I3]], align 4
// CHECK16-NEXT: [[TMP18:%.*]] = load i32, i32* [[I3]], align 4
// CHECK16-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 [[TMP18]]
// CHECK16-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK16-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK16: omp.body.continue:
// CHECK16-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK16: omp.inner.for.inc:
// CHECK16-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP19]], 1
// CHECK16-NEXT: store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK16: omp.inner.for.end:
// CHECK16-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK16: omp.loop.exit:
// CHECK16-NEXT: [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK16-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
// CHECK16-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
// CHECK16-NEXT: br label [[OMP_PRECOND_END]]
// CHECK16: omp.precond.end:
// CHECK16-NEXT: ret void
//
//
// CHECK16-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147
// CHECK16-SAME: (i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK16-NEXT: entry:
// CHECK16-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK16-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
// CHECK16-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK16-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK16-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK16-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK16-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK16-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK16-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK16-NEXT: store i32 [[TMP2]], i32* [[N_CASTED]], align 4
// CHECK16-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
// CHECK16-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK16-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK16-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]])
// CHECK16-NEXT: ret void
//
//
// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..5
// CHECK16-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK16-NEXT: entry:
// CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK16-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK16-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK16-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[I4:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
// CHECK16-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK16-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK16-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK16-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK16-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK16-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK16-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK16-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK16-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK16-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK16-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK16-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK16-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK16-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK16-NEXT: store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK16-NEXT: store i32 0, i32* [[I]], align 4
// CHECK16-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK16-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK16-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK16: omp.precond.then:
// CHECK16-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK16-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK16-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK16-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK16-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK16-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK16-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
// CHECK16-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP6]])
// CHECK16-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK16-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
// CHECK16-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK16: cond.true:
// CHECK16-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK16-NEXT: br label [[COND_END:%.*]]
// CHECK16: cond.false:
// CHECK16-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: br label [[COND_END]]
// CHECK16: cond.end:
// CHECK16-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
// CHECK16-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK16-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK16: omp.inner.for.cond:
// CHECK16-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK16-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP15]], 1
// CHECK16-NEXT: [[CMP6:%.*]] = icmp slt i32 [[TMP14]], [[ADD]]
// CHECK16-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK16: omp.inner.for.body:
// CHECK16-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK16-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: [[TMP18:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK16-NEXT: store i32 [[TMP18]], i32* [[N_CASTED]], align 4
// CHECK16-NEXT: [[TMP19:%.*]] = load i32, i32* [[N_CASTED]], align 4
// CHECK16-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK16-NEXT: store i32 [[TMP20]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK16-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*, i32)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32 [[TMP19]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP21]])
// CHECK16-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK16: omp.inner.for.inc:
// CHECK16-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK16-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
// CHECK16-NEXT: store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK16-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK16-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP24]], [[TMP25]]
// CHECK16-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_COMB_LB]], align 4
// CHECK16-NEXT: [[TMP26:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK16-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP26]], [[TMP27]]
// CHECK16-NEXT: store i32 [[ADD9]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK16-NEXT: [[CMP10:%.*]] = icmp sgt i32 [[TMP28]], [[TMP29]]
// CHECK16-NEXT: br i1 [[CMP10]], label [[COND_TRUE11:%.*]], label [[COND_FALSE12:%.*]]
// CHECK16: cond.true11:
// CHECK16-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK16-NEXT: br label [[COND_END13:%.*]]
// CHECK16: cond.false12:
// CHECK16-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: br label [[COND_END13]]
// CHECK16: cond.end13:
// CHECK16-NEXT: [[COND14:%.*]] = phi i32 [ [[TMP30]], [[COND_TRUE11]] ], [ [[TMP31]], [[COND_FALSE12]] ]
// CHECK16-NEXT: store i32 [[COND14]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK16-NEXT: store i32 [[TMP32]], i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK16: omp.inner.for.end:
// CHECK16-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK16: omp.loop.exit:
// CHECK16-NEXT: [[TMP33:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK16-NEXT: [[TMP34:%.*]] = load i32, i32* [[TMP33]], align 4
// CHECK16-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP34]])
// CHECK16-NEXT: br label [[OMP_PRECOND_END]]
// CHECK16: omp.precond.end:
// CHECK16-NEXT: ret void
//
//
// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..6
// CHECK16-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK16-NEXT: entry:
// CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK16-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK16-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK16-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[I4:%.*]] = alloca i32, align 4
// CHECK16-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK16-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK16-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK16-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK16-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK16-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK16-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK16-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK16-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK16-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK16-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK16-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK16-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK16-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK16-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK16-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK16-NEXT: store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK16-NEXT: store i32 0, i32* [[I]], align 4
// CHECK16-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK16-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK16-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK16: omp.precond.then:
// CHECK16-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK16-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK16-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
// CHECK16-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK16-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK16-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_LB]], align 4
// CHECK16-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
// CHECK16-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK16-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK16-NEXT: [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK16-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
// CHECK16-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP9]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK16-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK16-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK16-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
// CHECK16-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK16: cond.true:
// CHECK16-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK16-NEXT: br label [[COND_END:%.*]]
// CHECK16: cond.false:
// CHECK16-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK16-NEXT: br label [[COND_END]]
// CHECK16: cond.end:
// CHECK16-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
// CHECK16-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK16-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK16-NEXT: store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK16: omp.inner.for.cond:
// CHECK16-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK16-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
// CHECK16-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK16: omp.inner.for.body:
// CHECK16-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP17]], 1
// CHECK16-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK16-NEXT: store i32 [[ADD]], i32* [[I4]], align 4
// CHECK16-NEXT: [[TMP18:%.*]] = load i32, i32* [[I4]], align 4
// CHECK16-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 [[TMP18]]
// CHECK16-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK16-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK16: omp.body.continue:
// CHECK16-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK16: omp.inner.for.inc:
// CHECK16-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP19]], 1
// CHECK16-NEXT: store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK16: omp.inner.for.end:
// CHECK16-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK16: omp.loop.exit:
// CHECK16-NEXT: [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK16-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
// CHECK16-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
// CHECK16-NEXT: br label [[OMP_PRECOND_END]]
// CHECK16: omp.precond.end:
// CHECK16-NEXT: ret void
//
//
// CHECK16-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151
// CHECK16-SAME: (i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK16-NEXT: entry:
// CHECK16-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK16-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
// CHECK16-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK16-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK16-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK16-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK16-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK16-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK16-NEXT: store i32 [[TMP2]], i32* [[N_CASTED]], align 4
// CHECK16-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]])
// CHECK16-NEXT: ret void
//
//
// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..8
// CHECK16-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK16-NEXT: entry:
// CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK16-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK16-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK16-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[I3:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
// CHECK16-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK16-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK16-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK16-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK16-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK16-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK16-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK16-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK16-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK16-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK16-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK16-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK16-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK16-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK16-NEXT: store i32 0, i32* [[I]], align 4
// CHECK16-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK16-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK16-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK16: omp.precond.then:
// CHECK16-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK16-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK16-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK16-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK16-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK16-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
// CHECK16-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK16-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK16-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
// CHECK16-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK16: cond.true:
// CHECK16-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK16-NEXT: br label [[COND_END:%.*]]
// CHECK16: cond.false:
// CHECK16-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: br label [[COND_END]]
// CHECK16: cond.end:
// CHECK16-NEXT: [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
// CHECK16-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK16-NEXT: store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK16: omp.inner.for.cond:
// CHECK16-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
// CHECK16-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK16: omp.inner.for.body:
// CHECK16-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK16-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: [[TMP17:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK16-NEXT: store i32 [[TMP17]], i32* [[N_CASTED]], align 4
// CHECK16-NEXT: [[TMP18:%.*]] = load i32, i32* [[N_CASTED]], align 4
// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]])
// CHECK16-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK16: omp.inner.for.inc:
// CHECK16-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK16-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
// CHECK16-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK16: omp.inner.for.end:
// CHECK16-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK16: omp.loop.exit:
// CHECK16-NEXT: [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK16-NEXT: [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
// CHECK16-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
// CHECK16-NEXT: br label [[OMP_PRECOND_END]]
// CHECK16: omp.precond.end:
// CHECK16-NEXT: ret void
//
//
// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..9
// CHECK16-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK16-NEXT: entry:
// CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK16-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK16-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK16-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[I3:%.*]] = alloca i32, align 4
// CHECK16-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK16-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK16-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK16-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK16-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK16-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK16-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK16-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK16-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK16-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK16-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK16-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK16-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK16-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK16-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK16-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK16-NEXT: store i32 0, i32* [[I]], align 4
// CHECK16-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK16-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK16-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK16: omp.precond.then:
// CHECK16-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK16-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK16-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
// CHECK16-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK16-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK16-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_LB]], align 4
// CHECK16-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
// CHECK16-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK16-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK16-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK16-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK16-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK16-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
// CHECK16-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP11]], i32 35, i32 [[TMP8]], i32 [[TMP9]], i32 1, i32 1)
// CHECK16-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK16: omp.dispatch.cond:
// CHECK16-NEXT: [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK16-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
// CHECK16-NEXT: [[TMP14:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP13]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
// CHECK16-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP14]], 0
// CHECK16-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK16: omp.dispatch.body:
// CHECK16-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK16-NEXT: store i32 [[TMP15]], i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK16: omp.inner.for.cond:
// CHECK16-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !16
// CHECK16-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !16
// CHECK16-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
// CHECK16-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK16: omp.inner.for.body:
// CHECK16-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !16
// CHECK16-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
// CHECK16-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK16-NEXT: store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !16
// CHECK16-NEXT: [[TMP19:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !16
// CHECK16-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 [[TMP19]]
// CHECK16-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !16
// CHECK16-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK16: omp.body.continue:
// CHECK16-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK16: omp.inner.for.inc:
// CHECK16-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !16
// CHECK16-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP20]], 1
// CHECK16-NEXT: store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !16
// CHECK16-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP17:![0-9]+]]
// CHECK16: omp.inner.for.end:
// CHECK16-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK16: omp.dispatch.inc:
// CHECK16-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK16: omp.dispatch.end:
// CHECK16-NEXT: br label [[OMP_PRECOND_END]]
// CHECK16: omp.precond.end:
// CHECK16-NEXT: ret void
//
//
// CHECK16-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155
// CHECK16-SAME: (i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK16-NEXT: entry:
// CHECK16-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK16-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
// CHECK16-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK16-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK16-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK16-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK16-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK16-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK16-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK16-NEXT: store i32 [[TMP2]], i32* [[N_CASTED]], align 4
// CHECK16-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
// CHECK16-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK16-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK16-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]])
// CHECK16-NEXT: ret void
//
//
// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..11
// CHECK16-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK16-NEXT: entry:
// CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK16-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK16-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK16-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[I4:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
// CHECK16-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK16-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK16-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK16-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK16-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK16-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK16-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK16-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK16-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK16-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK16-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK16-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK16-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK16-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK16-NEXT: store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK16-NEXT: store i32 0, i32* [[I]], align 4
// CHECK16-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK16-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK16-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK16: omp.precond.then:
// CHECK16-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK16-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK16-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK16-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK16-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK16-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
// CHECK16-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK16-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK16-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
// CHECK16-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK16: cond.true:
// CHECK16-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK16-NEXT: br label [[COND_END:%.*]]
// CHECK16: cond.false:
// CHECK16-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: br label [[COND_END]]
// CHECK16: cond.end:
// CHECK16-NEXT: [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
// CHECK16-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK16-NEXT: store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK16: omp.inner.for.cond:
// CHECK16-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
// CHECK16-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK16: omp.inner.for.body:
// CHECK16-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK16-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: [[TMP17:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK16-NEXT: store i32 [[TMP17]], i32* [[N_CASTED]], align 4
// CHECK16-NEXT: [[TMP18:%.*]] = load i32, i32* [[N_CASTED]], align 4
// CHECK16-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK16-NEXT: store i32 [[TMP19]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK16-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*, i32)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP20]])
// CHECK16-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK16: omp.inner.for.inc:
// CHECK16-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK16-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
// CHECK16-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK16: omp.inner.for.end:
// CHECK16-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK16: omp.loop.exit:
// CHECK16-NEXT: [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK16-NEXT: [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
// CHECK16-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
// CHECK16-NEXT: br label [[OMP_PRECOND_END]]
// CHECK16: omp.precond.end:
// CHECK16-NEXT: ret void
//
//
// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..12
// CHECK16-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK16-NEXT: entry:
// CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK16-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK16-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK16-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[I4:%.*]] = alloca i32, align 4
// CHECK16-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK16-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK16-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK16-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK16-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK16-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK16-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK16-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK16-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK16-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK16-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK16-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK16-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK16-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK16-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK16-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK16-NEXT: store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK16-NEXT: store i32 0, i32* [[I]], align 4
// CHECK16-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK16-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK16-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK16: omp.precond.then:
// CHECK16-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK16-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK16-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
// CHECK16-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK16-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK16-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_LB]], align 4
// CHECK16-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
// CHECK16-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK16-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK16-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK16-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK16-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK16-NEXT: [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK16-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
// CHECK16-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP12]], i32 35, i32 [[TMP9]], i32 [[TMP10]], i32 1, i32 [[TMP8]])
// CHECK16-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK16: omp.dispatch.cond:
// CHECK16-NEXT: [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK16-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
// CHECK16-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
// CHECK16-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP15]], 0
// CHECK16-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK16: omp.dispatch.body:
// CHECK16-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK16-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK16: omp.inner.for.cond:
// CHECK16-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
// CHECK16-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !19
// CHECK16-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
// CHECK16-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK16: omp.inner.for.body:
// CHECK16-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
// CHECK16-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
// CHECK16-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK16-NEXT: store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !19
// CHECK16-NEXT: [[TMP20:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !19
// CHECK16-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 [[TMP20]]
// CHECK16-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !19
// CHECK16-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK16: omp.body.continue:
// CHECK16-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK16: omp.inner.for.inc:
// CHECK16-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
// CHECK16-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP21]], 1
// CHECK16-NEXT: store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
// CHECK16-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]]
// CHECK16: omp.inner.for.end:
// CHECK16-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK16: omp.dispatch.inc:
// CHECK16-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK16: omp.dispatch.end:
// CHECK16-NEXT: br label [[OMP_PRECOND_END]]
// CHECK16: omp.precond.end:
// CHECK16-NEXT: ret void
//
//
// CHECK16-LABEL: define {{[^@]+}}@_Z5tmainIiLi10EEiT_
// CHECK16-SAME: (i32 [[ARGC:%.*]]) #[[ATTR4:[0-9]+]] comdat {
// CHECK16-NEXT: entry:
// CHECK16-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[A:%.*]] = alloca [10 x i32], align 4
// CHECK16-NEXT: [[M:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 4
// CHECK16-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 4
// CHECK16-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 4
// CHECK16-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOFFLOAD_BASEPTRS1:%.*]] = alloca [1 x i8*], align 4
// CHECK16-NEXT: [[DOTOFFLOAD_PTRS2:%.*]] = alloca [1 x i8*], align 4
// CHECK16-NEXT: [[DOTOFFLOAD_MAPPERS3:%.*]] = alloca [1 x i8*], align 4
// CHECK16-NEXT: [[_TMP4:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOFFLOAD_BASEPTRS7:%.*]] = alloca [2 x i8*], align 4
// CHECK16-NEXT: [[DOTOFFLOAD_PTRS8:%.*]] = alloca [2 x i8*], align 4
// CHECK16-NEXT: [[DOTOFFLOAD_MAPPERS9:%.*]] = alloca [2 x i8*], align 4
// CHECK16-NEXT: [[_TMP10:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOFFLOAD_BASEPTRS13:%.*]] = alloca [1 x i8*], align 4
// CHECK16-NEXT: [[DOTOFFLOAD_PTRS14:%.*]] = alloca [1 x i8*], align 4
// CHECK16-NEXT: [[DOTOFFLOAD_MAPPERS15:%.*]] = alloca [1 x i8*], align 4
// CHECK16-NEXT: [[_TMP16:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTCAPTURE_EXPR_19:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTCAPTURE_EXPR__CASTED20:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOFFLOAD_BASEPTRS21:%.*]] = alloca [2 x i8*], align 4
// CHECK16-NEXT: [[DOTOFFLOAD_PTRS22:%.*]] = alloca [2 x i8*], align 4
// CHECK16-NEXT: [[DOTOFFLOAD_MAPPERS23:%.*]] = alloca [2 x i8*], align 4
// CHECK16-NEXT: [[_TMP24:%.*]] = alloca i32, align 4
// CHECK16-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
// CHECK16-NEXT: store i32 10, i32* [[M]], align 4
// CHECK16-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK16-NEXT: [[TMP1:%.*]] = bitcast i8** [[TMP0]] to [10 x i32]**
// CHECK16-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP1]], align 4
// CHECK16-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK16-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to [10 x i32]**
// CHECK16-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP3]], align 4
// CHECK16-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// CHECK16-NEXT: store i8* null, i8** [[TMP4]], align 4
// CHECK16-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK16-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
// CHECK16-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK16-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0
// CHECK16-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK16: omp_offload.failed:
// CHECK16-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112([10 x i32]* [[A]]) #[[ATTR3]]
// CHECK16-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK16: omp_offload.cont:
// CHECK16-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
// CHECK16-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to [10 x i32]**
// CHECK16-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP10]], align 4
// CHECK16-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
// CHECK16-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to [10 x i32]**
// CHECK16-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP12]], align 4
// CHECK16-NEXT: [[TMP13:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i32 0, i32 0
// CHECK16-NEXT: store i8* null, i8** [[TMP13]], align 4
// CHECK16-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
// CHECK16-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
// CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
// CHECK16-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK16-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
// CHECK16-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]]
// CHECK16: omp_offload.failed5:
// CHECK16-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116([10 x i32]* [[A]]) #[[ATTR3]]
// CHECK16-NEXT: br label [[OMP_OFFLOAD_CONT6]]
// CHECK16: omp_offload.cont6:
// CHECK16-NEXT: [[TMP18:%.*]] = load i32, i32* [[M]], align 4
// CHECK16-NEXT: store i32 [[TMP18]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK16-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK16-NEXT: store i32 [[TMP19]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK16-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK16-NEXT: [[TMP21:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0
// CHECK16-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to [10 x i32]**
// CHECK16-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP22]], align 4
// CHECK16-NEXT: [[TMP23:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0
// CHECK16-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to [10 x i32]**
// CHECK16-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP24]], align 4
// CHECK16-NEXT: [[TMP25:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i32 0, i32 0
// CHECK16-NEXT: store i8* null, i8** [[TMP25]], align 4
// CHECK16-NEXT: [[TMP26:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 1
// CHECK16-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i32*
// CHECK16-NEXT: store i32 [[TMP20]], i32* [[TMP27]], align 4
// CHECK16-NEXT: [[TMP28:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 1
// CHECK16-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32*
// CHECK16-NEXT: store i32 [[TMP20]], i32* [[TMP29]], align 4
// CHECK16-NEXT: [[TMP30:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i32 0, i32 1
// CHECK16-NEXT: store i8* null, i8** [[TMP30]], align 4
// CHECK16-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0
// CHECK16-NEXT: [[TMP32:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0
// CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
// CHECK16-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.23, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.24, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK16-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0
// CHECK16-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]]
// CHECK16: omp_offload.failed11:
// CHECK16-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120([10 x i32]* [[A]], i32 [[TMP20]]) #[[ATTR3]]
// CHECK16-NEXT: br label [[OMP_OFFLOAD_CONT12]]
// CHECK16: omp_offload.cont12:
// CHECK16-NEXT: [[TMP35:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0
// CHECK16-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to [10 x i32]**
// CHECK16-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP36]], align 4
// CHECK16-NEXT: [[TMP37:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0
// CHECK16-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to [10 x i32]**
// CHECK16-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP38]], align 4
// CHECK16-NEXT: [[TMP39:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 0
// CHECK16-NEXT: store i8* null, i8** [[TMP39]], align 4
// CHECK16-NEXT: [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0
// CHECK16-NEXT: [[TMP41:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0
// CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
// CHECK16-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124.region_id, i32 1, i8** [[TMP40]], i8** [[TMP41]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.27, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.28, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK16-NEXT: [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0
// CHECK16-NEXT: br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]]
// CHECK16: omp_offload.failed17:
// CHECK16-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124([10 x i32]* [[A]]) #[[ATTR3]]
// CHECK16-NEXT: br label [[OMP_OFFLOAD_CONT18]]
// CHECK16: omp_offload.cont18:
// CHECK16-NEXT: [[TMP44:%.*]] = load i32, i32* [[M]], align 4
// CHECK16-NEXT: store i32 [[TMP44]], i32* [[DOTCAPTURE_EXPR_19]], align 4
// CHECK16-NEXT: [[TMP45:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_19]], align 4
// CHECK16-NEXT: store i32 [[TMP45]], i32* [[DOTCAPTURE_EXPR__CASTED20]], align 4
// CHECK16-NEXT: [[TMP46:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED20]], align 4
// CHECK16-NEXT: [[TMP47:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0
// CHECK16-NEXT: [[TMP48:%.*]] = bitcast i8** [[TMP47]] to [10 x i32]**
// CHECK16-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP48]], align 4
// CHECK16-NEXT: [[TMP49:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0
// CHECK16-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to [10 x i32]**
// CHECK16-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP50]], align 4
// CHECK16-NEXT: [[TMP51:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 0
// CHECK16-NEXT: store i8* null, i8** [[TMP51]], align 4
// CHECK16-NEXT: [[TMP52:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 1
// CHECK16-NEXT: [[TMP53:%.*]] = bitcast i8** [[TMP52]] to i32*
// CHECK16-NEXT: store i32 [[TMP46]], i32* [[TMP53]], align 4
// CHECK16-NEXT: [[TMP54:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 1
// CHECK16-NEXT: [[TMP55:%.*]] = bitcast i8** [[TMP54]] to i32*
// CHECK16-NEXT: store i32 [[TMP46]], i32* [[TMP55]], align 4
// CHECK16-NEXT: [[TMP56:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 1
// CHECK16-NEXT: store i8* null, i8** [[TMP56]], align 4
// CHECK16-NEXT: [[TMP57:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0
// CHECK16-NEXT: [[TMP58:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0
// CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
// CHECK16-NEXT: [[TMP59:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128.region_id, i32 2, i8** [[TMP57]], i8** [[TMP58]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.31, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.32, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK16-NEXT: [[TMP60:%.*]] = icmp ne i32 [[TMP59]], 0
// CHECK16-NEXT: br i1 [[TMP60]], label [[OMP_OFFLOAD_FAILED25:%.*]], label [[OMP_OFFLOAD_CONT26:%.*]]
// CHECK16: omp_offload.failed25:
// CHECK16-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128([10 x i32]* [[A]], i32 [[TMP46]]) #[[ATTR3]]
// CHECK16-NEXT: br label [[OMP_OFFLOAD_CONT26]]
// CHECK16: omp_offload.cont26:
// CHECK16-NEXT: ret i32 0
//
//
// CHECK16-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112
// CHECK16-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK16-NEXT: entry:
// CHECK16-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK16-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
// CHECK16-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]])
// CHECK16-NEXT: ret void
//
//
// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..14
// CHECK16-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK16-NEXT: entry:
// CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK16-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK16-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK16-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK16-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK16-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK16-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
// CHECK16-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
// CHECK16-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK16-NEXT: store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK16-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK16-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK16-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK16-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK16-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
// CHECK16-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK16: cond.true:
// CHECK16-NEXT: br label [[COND_END:%.*]]
// CHECK16: cond.false:
// CHECK16-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: br label [[COND_END]]
// CHECK16: cond.end:
// CHECK16-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK16-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK16-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK16: omp.inner.for.cond:
// CHECK16-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK16-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK16: omp.inner.for.body:
// CHECK16-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK16-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]])
// CHECK16-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK16: omp.inner.for.inc:
// CHECK16-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK16-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
// CHECK16-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK16: omp.inner.for.end:
// CHECK16-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK16: omp.loop.exit:
// CHECK16-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK16-NEXT: ret void
//
//
// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..15
// CHECK16-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK16-NEXT: entry:
// CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK16-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK16-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK16-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK16-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK16-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK16-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK16-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK16-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
// CHECK16-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
// CHECK16-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK16-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4
// CHECK16-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK16-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK16-NEXT: store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
// CHECK16-NEXT: store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
// CHECK16-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK16-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK16-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK16-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK16-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK16-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK16-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 9
// CHECK16-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK16: cond.true:
// CHECK16-NEXT: br label [[COND_END:%.*]]
// CHECK16: cond.false:
// CHECK16-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK16-NEXT: br label [[COND_END]]
// CHECK16: cond.end:
// CHECK16-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
// CHECK16-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK16-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK16-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK16: omp.inner.for.cond:
// CHECK16-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK16-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
// CHECK16-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK16: omp.inner.for.body:
// CHECK16-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
// CHECK16-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK16-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK16-NEXT: [[TMP11:%.*]] = load i32, i32* [[I]], align 4
// CHECK16-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 [[TMP11]]
// CHECK16-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK16-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK16: omp.body.continue:
// CHECK16-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK16: omp.inner.for.inc:
// CHECK16-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP12]], 1
// CHECK16-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK16: omp.inner.for.end:
// CHECK16-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK16: omp.loop.exit:
// CHECK16-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
// CHECK16-NEXT: ret void
//
//
// CHECK16-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116
// CHECK16-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK16-NEXT: entry:
// CHECK16-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK16-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
// CHECK16-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..17 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]])
// CHECK16-NEXT: ret void
//
//
// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..17
// CHECK16-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK16-NEXT: entry:
// CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK16-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK16-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK16-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK16-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK16-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK16-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
// CHECK16-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
// CHECK16-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK16-NEXT: store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK16-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK16-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK16-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK16-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK16-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
// CHECK16-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK16: cond.true:
// CHECK16-NEXT: br label [[COND_END:%.*]]
// CHECK16: cond.false:
// CHECK16-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: br label [[COND_END]]
// CHECK16: cond.end:
// CHECK16-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK16-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK16-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK16: omp.inner.for.cond:
// CHECK16-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK16-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK16: omp.inner.for.body:
// CHECK16-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK16-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]])
// CHECK16-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK16: omp.inner.for.inc:
// CHECK16-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK16-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
// CHECK16-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK16: omp.inner.for.end:
// CHECK16-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK16: omp.loop.exit:
// CHECK16-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK16-NEXT: ret void
//
//
// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..18
// CHECK16-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK16-NEXT: entry:
// CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK16-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK16-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK16-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK16-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK16-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK16-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK16-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK16-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
// CHECK16-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
// CHECK16-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK16-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4
// CHECK16-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK16-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK16-NEXT: store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
// CHECK16-NEXT: store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
// CHECK16-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK16-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK16-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK16-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK16-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK16-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK16-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 9
// CHECK16-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK16: cond.true:
// CHECK16-NEXT: br label [[COND_END:%.*]]
// CHECK16: cond.false:
// CHECK16-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK16-NEXT: br label [[COND_END]]
// CHECK16: cond.end:
// CHECK16-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
// CHECK16-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK16-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK16-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK16: omp.inner.for.cond:
// CHECK16-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK16-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
// CHECK16-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK16: omp.inner.for.body:
// CHECK16-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
// CHECK16-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK16-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK16-NEXT: [[TMP11:%.*]] = load i32, i32* [[I]], align 4
// CHECK16-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 [[TMP11]]
// CHECK16-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK16-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK16: omp.body.continue:
// CHECK16-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK16: omp.inner.for.inc:
// CHECK16-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP12]], 1
// CHECK16-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK16: omp.inner.for.end:
// CHECK16-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK16: omp.loop.exit:
// CHECK16-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
// CHECK16-NEXT: ret void
//
//
// CHECK16-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120
// CHECK16-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK16-NEXT: entry:
// CHECK16-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK16-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
// CHECK16-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
// CHECK16-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK16-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
// CHECK16-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK16-NEXT: store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK16-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..21 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP2]])
// CHECK16-NEXT: ret void
//
//
// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..21
// CHECK16-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK16-NEXT: entry:
// CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK16-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK16-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK16-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
// CHECK16-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK16-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK16-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
// CHECK16-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK16-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
// CHECK16-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK16-NEXT: store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK16-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK16-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK16-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK16-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK16-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
// CHECK16-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK16: cond.true:
// CHECK16-NEXT: br label [[COND_END:%.*]]
// CHECK16: cond.false:
// CHECK16-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: br label [[COND_END]]
// CHECK16: cond.end:
// CHECK16-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK16-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK16-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK16: omp.inner.for.cond:
// CHECK16-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK16-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK16: omp.inner.for.body:
// CHECK16-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK16-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK16-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK16-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]])
// CHECK16-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK16: omp.inner.for.inc:
// CHECK16-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK16-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
// CHECK16-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK16: omp.inner.for.end:
// CHECK16-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK16: omp.loop.exit:
// CHECK16-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK16-NEXT: ret void
//
//
// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..22
// CHECK16-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK16-NEXT: entry:
// CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK16-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK16-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK16-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK16-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK16-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK16-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK16-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK16-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
// CHECK16-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK16-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
// CHECK16-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK16-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4
// CHECK16-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK16-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK16-NEXT: store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
// CHECK16-NEXT: store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
// CHECK16-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK16-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK16-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK16-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK16-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
// CHECK16-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP5]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP3]])
// CHECK16-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK16: omp.dispatch.cond:
// CHECK16-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK16-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK16-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], [[TMP7]]
// CHECK16-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK16: cond.true:
// CHECK16-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK16-NEXT: br label [[COND_END:%.*]]
// CHECK16: cond.false:
// CHECK16-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK16-NEXT: br label [[COND_END]]
// CHECK16: cond.end:
// CHECK16-NEXT: [[COND:%.*]] = phi i32 [ [[TMP8]], [[COND_TRUE]] ], [ [[TMP9]], [[COND_FALSE]] ]
// CHECK16-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK16-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK16-NEXT: store i32 [[TMP10]], i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK16-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]]
// CHECK16-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK16: omp.dispatch.body:
// CHECK16-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK16: omp.inner.for.cond:
// CHECK16-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK16-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
// CHECK16-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK16: omp.inner.for.body:
// CHECK16-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP15]], 1
// CHECK16-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK16-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK16-NEXT: [[TMP16:%.*]] = load i32, i32* [[I]], align 4
// CHECK16-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 [[TMP16]]
// CHECK16-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK16-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK16: omp.body.continue:
// CHECK16-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK16: omp.inner.for.inc:
// CHECK16-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP17]], 1
// CHECK16-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK16: omp.inner.for.end:
// CHECK16-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK16: omp.dispatch.inc:
// CHECK16-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK16-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK16-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
// CHECK16-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_LB]], align 4
// CHECK16-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK16-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK16-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
// CHECK16-NEXT: store i32 [[ADD5]], i32* [[DOTOMP_UB]], align 4
// CHECK16-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK16: omp.dispatch.end:
// CHECK16-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
// CHECK16-NEXT: ret void
//
//
// CHECK16-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124
// CHECK16-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK16-NEXT: entry:
// CHECK16-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK16-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
// CHECK16-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..25 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]])
// CHECK16-NEXT: ret void
//
//
// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..25
// CHECK16-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK16-NEXT: entry:
// CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK16-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK16-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK16-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK16-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK16-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK16-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
// CHECK16-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
// CHECK16-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK16-NEXT: store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK16-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK16-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK16-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK16-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK16-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
// CHECK16-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK16: cond.true:
// CHECK16-NEXT: br label [[COND_END:%.*]]
// CHECK16: cond.false:
// CHECK16-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: br label [[COND_END]]
// CHECK16: cond.end:
// CHECK16-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK16-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK16-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK16: omp.inner.for.cond:
// CHECK16-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK16-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK16: omp.inner.for.body:
// CHECK16-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK16-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]])
// CHECK16-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK16: omp.inner.for.inc:
// CHECK16-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK16-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
// CHECK16-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK16: omp.inner.for.end:
// CHECK16-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK16: omp.loop.exit:
// CHECK16-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK16-NEXT: ret void
//
//
// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..26
// CHECK16-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK16-NEXT: entry:
// CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK16-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK16-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK16-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK16-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK16-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK16-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK16-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK16-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
// CHECK16-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
// CHECK16-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK16-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4
// CHECK16-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK16-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK16-NEXT: store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
// CHECK16-NEXT: store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
// CHECK16-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK16-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK16-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK16-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK16-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK16-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
// CHECK16-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32 35, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 1)
// CHECK16-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK16: omp.dispatch.cond:
// CHECK16-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
// CHECK16-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
// CHECK16-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK16: omp.dispatch.body:
// CHECK16-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK16-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK16: omp.inner.for.cond:
// CHECK16-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
// CHECK16-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !22
// CHECK16-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
// CHECK16-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK16: omp.inner.for.body:
// CHECK16-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
// CHECK16-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
// CHECK16-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK16-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !22
// CHECK16-NEXT: [[TMP12:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !22
// CHECK16-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 [[TMP12]]
// CHECK16-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !22
// CHECK16-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK16: omp.body.continue:
// CHECK16-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK16: omp.inner.for.inc:
// CHECK16-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
// CHECK16-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP13]], 1
// CHECK16-NEXT: store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
// CHECK16-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]]
// CHECK16: omp.inner.for.end:
// CHECK16-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK16: omp.dispatch.inc:
// CHECK16-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK16: omp.dispatch.end:
// CHECK16-NEXT: ret void
//
//
// CHECK16-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128
// CHECK16-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK16-NEXT: entry:
// CHECK16-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK16-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
// CHECK16-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
// CHECK16-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK16-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
// CHECK16-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK16-NEXT: store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK16-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..29 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP2]])
// CHECK16-NEXT: ret void
//
//
// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..29
// CHECK16-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK16-NEXT: entry:
// CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK16-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK16-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK16-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
// CHECK16-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK16-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK16-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
// CHECK16-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK16-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
// CHECK16-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK16-NEXT: store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK16-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK16-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK16-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK16-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK16-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
// CHECK16-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK16: cond.true:
// CHECK16-NEXT: br label [[COND_END:%.*]]
// CHECK16: cond.false:
// CHECK16-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: br label [[COND_END]]
// CHECK16: cond.end:
// CHECK16-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK16-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK16-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK16: omp.inner.for.cond:
// CHECK16-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK16-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK16: omp.inner.for.body:
// CHECK16-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK16-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK16-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK16-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK16-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..30 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]])
// CHECK16-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK16: omp.inner.for.inc:
// CHECK16-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK16-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
// CHECK16-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK16: omp.inner.for.end:
// CHECK16-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK16: omp.loop.exit:
// CHECK16-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK16-NEXT: ret void
//
//
// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..30
// CHECK16-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK16-NEXT: entry:
// CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK16-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK16-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK16-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK16-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK16-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK16-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK16-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK16-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK16-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
// CHECK16-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK16-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
// CHECK16-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK16-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4
// CHECK16-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK16-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK16-NEXT: store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
// CHECK16-NEXT: store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
// CHECK16-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK16-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK16-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK16-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK16-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK16-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK16-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
// CHECK16-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], i32 35, i32 [[TMP4]], i32 [[TMP5]], i32 1, i32 [[TMP3]])
// CHECK16-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK16: omp.dispatch.cond:
// CHECK16-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
// CHECK16-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP8]], 0
// CHECK16-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK16: omp.dispatch.body:
// CHECK16-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK16-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
// CHECK16-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK16: omp.inner.for.cond:
// CHECK16-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
// CHECK16-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !25
// CHECK16-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
// CHECK16-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK16: omp.inner.for.body:
// CHECK16-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
// CHECK16-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
// CHECK16-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK16-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !25
// CHECK16-NEXT: [[TMP13:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !25
// CHECK16-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 [[TMP13]]
// CHECK16-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !25
// CHECK16-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK16: omp.body.continue:
// CHECK16-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK16: omp.inner.for.inc:
// CHECK16-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
// CHECK16-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP14]], 1
// CHECK16-NEXT: store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
// CHECK16-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]]
// CHECK16: omp.inner.for.end:
// CHECK16-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK16: omp.dispatch.inc:
// CHECK16-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK16: omp.dispatch.end:
// CHECK16-NEXT: ret void
//
//
// CHECK16-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
// CHECK16-SAME: () #[[ATTR5:[0-9]+]] {
// CHECK16-NEXT: entry:
// CHECK16-NEXT: call void @__tgt_register_requires(i64 1)
// CHECK16-NEXT: ret void
//
//
// CHECK17-LABEL: define {{[^@]+}}@main
// CHECK17-SAME: (i32 signext [[ARGC:%.*]], i8** [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[ARGV_ADDR:%.*]] = alloca i8**, align 8
// CHECK17-NEXT: [[N:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
// CHECK17-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[M:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8
// CHECK17-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8
// CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8
// CHECK17-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8
// CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[N_CASTED3:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [3 x i8*], align 8
// CHECK17-NEXT: [[DOTOFFLOAD_PTRS6:%.*]] = alloca [3 x i8*], align 8
// CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [3 x i8*], align 8
// CHECK17-NEXT: [[DOTOFFLOAD_SIZES8:%.*]] = alloca [3 x i64], align 8
// CHECK17-NEXT: [[_TMP9:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTCAPTURE_EXPR_11:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTCAPTURE_EXPR_18:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[N_CASTED19:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS22:%.*]] = alloca [4 x i8*], align 8
// CHECK17-NEXT: [[DOTOFFLOAD_PTRS23:%.*]] = alloca [4 x i8*], align 8
// CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS24:%.*]] = alloca [4 x i8*], align 8
// CHECK17-NEXT: [[DOTOFFLOAD_SIZES25:%.*]] = alloca [4 x i64], align 8
// CHECK17-NEXT: [[_TMP26:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTCAPTURE_EXPR_27:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTCAPTURE_EXPR_28:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[N_CASTED35:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS37:%.*]] = alloca [3 x i8*], align 8
// CHECK17-NEXT: [[DOTOFFLOAD_PTRS38:%.*]] = alloca [3 x i8*], align 8
// CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS39:%.*]] = alloca [3 x i8*], align 8
// CHECK17-NEXT: [[DOTOFFLOAD_SIZES40:%.*]] = alloca [3 x i64], align 8
// CHECK17-NEXT: [[_TMP41:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTCAPTURE_EXPR_42:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTCAPTURE_EXPR_43:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTCAPTURE_EXPR_50:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[N_CASTED51:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[DOTCAPTURE_EXPR__CASTED53:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS55:%.*]] = alloca [4 x i8*], align 8
// CHECK17-NEXT: [[DOTOFFLOAD_PTRS56:%.*]] = alloca [4 x i8*], align 8
// CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS57:%.*]] = alloca [4 x i8*], align 8
// CHECK17-NEXT: [[DOTOFFLOAD_SIZES58:%.*]] = alloca [4 x i64], align 8
// CHECK17-NEXT: [[_TMP59:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTCAPTURE_EXPR_60:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTCAPTURE_EXPR_61:%.*]] = alloca i32, align 4
// CHECK17-NEXT: store i32 0, i32* [[RETVAL]], align 4
// CHECK17-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
// CHECK17-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8
// CHECK17-NEXT: store i32 100, i32* [[N]], align 4
// CHECK17-NEXT: [[TMP0:%.*]] = load i32, i32* [[N]], align 4
// CHECK17-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
// CHECK17-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave()
// CHECK17-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8
// CHECK17-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4
// CHECK17-NEXT: store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8
// CHECK17-NEXT: store i32 10, i32* [[M]], align 4
// CHECK17-NEXT: [[TMP3:%.*]] = load i32, i32* [[N]], align 4
// CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[N_CASTED]] to i32*
// CHECK17-NEXT: store i32 [[TMP3]], i32* [[CONV]], align 4
// CHECK17-NEXT: [[TMP4:%.*]] = load i64, i64* [[N_CASTED]], align 8
// CHECK17-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP1]], 4
// CHECK17-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK17-NEXT: [[TMP7:%.*]] = bitcast i8** [[TMP6]] to i64*
// CHECK17-NEXT: store i64 [[TMP4]], i64* [[TMP7]], align 8
// CHECK17-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK17-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64*
// CHECK17-NEXT: store i64 [[TMP4]], i64* [[TMP9]], align 8
// CHECK17-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
// CHECK17-NEXT: store i64 4, i64* [[TMP10]], align 8
// CHECK17-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
// CHECK17-NEXT: store i8* null, i8** [[TMP11]], align 8
// CHECK17-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
// CHECK17-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64*
// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP13]], align 8
// CHECK17-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
// CHECK17-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64*
// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP15]], align 8
// CHECK17-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1
// CHECK17-NEXT: store i64 8, i64* [[TMP16]], align 8
// CHECK17-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
// CHECK17-NEXT: store i8* null, i8** [[TMP17]], align 8
// CHECK17-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
// CHECK17-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32**
// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 8
// CHECK17-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
// CHECK17-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32**
// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 8
// CHECK17-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2
// CHECK17-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 8
// CHECK17-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
// CHECK17-NEXT: store i8* null, i8** [[TMP23]], align 8
// CHECK17-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK17-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK17-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
// CHECK17-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4
// CHECK17-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK17-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK17-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0
// CHECK17-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK17-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK17-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK17-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1
// CHECK17-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64
// CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]])
// CHECK17-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK17-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
// CHECK17-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK17: omp_offload.failed:
// CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139(i64 [[TMP4]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]]
// CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK17: omp_offload.cont:
// CHECK17-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4
// CHECK17-NEXT: [[CONV4:%.*]] = bitcast i64* [[N_CASTED3]] to i32*
// CHECK17-NEXT: store i32 [[TMP33]], i32* [[CONV4]], align 4
// CHECK17-NEXT: [[TMP34:%.*]] = load i64, i64* [[N_CASTED3]], align 8
// CHECK17-NEXT: [[TMP35:%.*]] = mul nuw i64 [[TMP1]], 4
// CHECK17-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0
// CHECK17-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i64*
// CHECK17-NEXT: store i64 [[TMP34]], i64* [[TMP37]], align 8
// CHECK17-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0
// CHECK17-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i64*
// CHECK17-NEXT: store i64 [[TMP34]], i64* [[TMP39]], align 8
// CHECK17-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0
// CHECK17-NEXT: store i64 4, i64* [[TMP40]], align 8
// CHECK17-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0
// CHECK17-NEXT: store i8* null, i8** [[TMP41]], align 8
// CHECK17-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1
// CHECK17-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i64*
// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP43]], align 8
// CHECK17-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1
// CHECK17-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i64*
// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP45]], align 8
// CHECK17-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 1
// CHECK17-NEXT: store i64 8, i64* [[TMP46]], align 8
// CHECK17-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1
// CHECK17-NEXT: store i8* null, i8** [[TMP47]], align 8
// CHECK17-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2
// CHECK17-NEXT: [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32**
// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP49]], align 8
// CHECK17-NEXT: [[TMP50:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2
// CHECK17-NEXT: [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i32**
// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP51]], align 8
// CHECK17-NEXT: [[TMP52:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 2
// CHECK17-NEXT: store i64 [[TMP35]], i64* [[TMP52]], align 8
// CHECK17-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2
// CHECK17-NEXT: store i8* null, i8** [[TMP53]], align 8
// CHECK17-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0
// CHECK17-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0
// CHECK17-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0
// CHECK17-NEXT: [[TMP57:%.*]] = load i32, i32* [[N]], align 4
// CHECK17-NEXT: store i32 [[TMP57]], i32* [[DOTCAPTURE_EXPR_10]], align 4
// CHECK17-NEXT: [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4
// CHECK17-NEXT: [[SUB12:%.*]] = sub nsw i32 [[TMP58]], 0
// CHECK17-NEXT: [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1
// CHECK17-NEXT: [[SUB14:%.*]] = sub nsw i32 [[DIV13]], 1
// CHECK17-NEXT: store i32 [[SUB14]], i32* [[DOTCAPTURE_EXPR_11]], align 4
// CHECK17-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4
// CHECK17-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP59]], 1
// CHECK17-NEXT: [[TMP60:%.*]] = zext i32 [[ADD15]] to i64
// CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP60]])
// CHECK17-NEXT: [[TMP61:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143.region_id, i32 3, i8** [[TMP54]], i8** [[TMP55]], i64* [[TMP56]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK17-NEXT: [[TMP62:%.*]] = icmp ne i32 [[TMP61]], 0
// CHECK17-NEXT: br i1 [[TMP62]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]]
// CHECK17: omp_offload.failed16:
// CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143(i64 [[TMP34]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]]
// CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT17]]
// CHECK17: omp_offload.cont17:
// CHECK17-NEXT: [[TMP63:%.*]] = load i32, i32* [[M]], align 4
// CHECK17-NEXT: store i32 [[TMP63]], i32* [[DOTCAPTURE_EXPR_18]], align 4
// CHECK17-NEXT: [[TMP64:%.*]] = load i32, i32* [[N]], align 4
// CHECK17-NEXT: [[CONV20:%.*]] = bitcast i64* [[N_CASTED19]] to i32*
// CHECK17-NEXT: store i32 [[TMP64]], i32* [[CONV20]], align 4
// CHECK17-NEXT: [[TMP65:%.*]] = load i64, i64* [[N_CASTED19]], align 8
// CHECK17-NEXT: [[TMP66:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_18]], align 4
// CHECK17-NEXT: [[CONV21:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
// CHECK17-NEXT: store i32 [[TMP66]], i32* [[CONV21]], align 4
// CHECK17-NEXT: [[TMP67:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
// CHECK17-NEXT: [[TMP68:%.*]] = mul nuw i64 [[TMP1]], 4
// CHECK17-NEXT: [[TMP69:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0
// CHECK17-NEXT: [[TMP70:%.*]] = bitcast i8** [[TMP69]] to i64*
// CHECK17-NEXT: store i64 [[TMP65]], i64* [[TMP70]], align 8
// CHECK17-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0
// CHECK17-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i64*
// CHECK17-NEXT: store i64 [[TMP65]], i64* [[TMP72]], align 8
// CHECK17-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 0
// CHECK17-NEXT: store i64 4, i64* [[TMP73]], align 8
// CHECK17-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 0
// CHECK17-NEXT: store i8* null, i8** [[TMP74]], align 8
// CHECK17-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 1
// CHECK17-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i64*
// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP76]], align 8
// CHECK17-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 1
// CHECK17-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i64*
// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP78]], align 8
// CHECK17-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 1
// CHECK17-NEXT: store i64 8, i64* [[TMP79]], align 8
// CHECK17-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 1
// CHECK17-NEXT: store i8* null, i8** [[TMP80]], align 8
// CHECK17-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 2
// CHECK17-NEXT: [[TMP82:%.*]] = bitcast i8** [[TMP81]] to i32**
// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP82]], align 8
// CHECK17-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 2
// CHECK17-NEXT: [[TMP84:%.*]] = bitcast i8** [[TMP83]] to i32**
// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP84]], align 8
// CHECK17-NEXT: [[TMP85:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 2
// CHECK17-NEXT: store i64 [[TMP68]], i64* [[TMP85]], align 8
// CHECK17-NEXT: [[TMP86:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 2
// CHECK17-NEXT: store i8* null, i8** [[TMP86]], align 8
// CHECK17-NEXT: [[TMP87:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 3
// CHECK17-NEXT: [[TMP88:%.*]] = bitcast i8** [[TMP87]] to i64*
// CHECK17-NEXT: store i64 [[TMP67]], i64* [[TMP88]], align 8
// CHECK17-NEXT: [[TMP89:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 3
// CHECK17-NEXT: [[TMP90:%.*]] = bitcast i8** [[TMP89]] to i64*
// CHECK17-NEXT: store i64 [[TMP67]], i64* [[TMP90]], align 8
// CHECK17-NEXT: [[TMP91:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 3
// CHECK17-NEXT: store i64 4, i64* [[TMP91]], align 8
// CHECK17-NEXT: [[TMP92:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 3
// CHECK17-NEXT: store i8* null, i8** [[TMP92]], align 8
// CHECK17-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0
// CHECK17-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0
// CHECK17-NEXT: [[TMP95:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 0
// CHECK17-NEXT: [[TMP96:%.*]] = load i32, i32* [[N]], align 4
// CHECK17-NEXT: store i32 [[TMP96]], i32* [[DOTCAPTURE_EXPR_27]], align 4
// CHECK17-NEXT: [[TMP97:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_27]], align 4
// CHECK17-NEXT: [[SUB29:%.*]] = sub nsw i32 [[TMP97]], 0
// CHECK17-NEXT: [[DIV30:%.*]] = sdiv i32 [[SUB29]], 1
// CHECK17-NEXT: [[SUB31:%.*]] = sub nsw i32 [[DIV30]], 1
// CHECK17-NEXT: store i32 [[SUB31]], i32* [[DOTCAPTURE_EXPR_28]], align 4
// CHECK17-NEXT: [[TMP98:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_28]], align 4
// CHECK17-NEXT: [[ADD32:%.*]] = add nsw i32 [[TMP98]], 1
// CHECK17-NEXT: [[TMP99:%.*]] = zext i32 [[ADD32]] to i64
// CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP99]])
// CHECK17-NEXT: [[TMP100:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147.region_id, i32 4, i8** [[TMP93]], i8** [[TMP94]], i64* [[TMP95]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK17-NEXT: [[TMP101:%.*]] = icmp ne i32 [[TMP100]], 0
// CHECK17-NEXT: br i1 [[TMP101]], label [[OMP_OFFLOAD_FAILED33:%.*]], label [[OMP_OFFLOAD_CONT34:%.*]]
// CHECK17: omp_offload.failed33:
// CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147(i64 [[TMP65]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP67]]) #[[ATTR3]]
// CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT34]]
// CHECK17: omp_offload.cont34:
// CHECK17-NEXT: [[TMP102:%.*]] = load i32, i32* [[N]], align 4
// CHECK17-NEXT: [[CONV36:%.*]] = bitcast i64* [[N_CASTED35]] to i32*
// CHECK17-NEXT: store i32 [[TMP102]], i32* [[CONV36]], align 4
// CHECK17-NEXT: [[TMP103:%.*]] = load i64, i64* [[N_CASTED35]], align 8
// CHECK17-NEXT: [[TMP104:%.*]] = mul nuw i64 [[TMP1]], 4
// CHECK17-NEXT: [[TMP105:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 0
// CHECK17-NEXT: [[TMP106:%.*]] = bitcast i8** [[TMP105]] to i64*
// CHECK17-NEXT: store i64 [[TMP103]], i64* [[TMP106]], align 8
// CHECK17-NEXT: [[TMP107:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 0
// CHECK17-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to i64*
// CHECK17-NEXT: store i64 [[TMP103]], i64* [[TMP108]], align 8
// CHECK17-NEXT: [[TMP109:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 0
// CHECK17-NEXT: store i64 4, i64* [[TMP109]], align 8
// CHECK17-NEXT: [[TMP110:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS39]], i64 0, i64 0
// CHECK17-NEXT: store i8* null, i8** [[TMP110]], align 8
// CHECK17-NEXT: [[TMP111:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 1
// CHECK17-NEXT: [[TMP112:%.*]] = bitcast i8** [[TMP111]] to i64*
// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP112]], align 8
// CHECK17-NEXT: [[TMP113:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 1
// CHECK17-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to i64*
// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP114]], align 8
// CHECK17-NEXT: [[TMP115:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 1
// CHECK17-NEXT: store i64 8, i64* [[TMP115]], align 8
// CHECK17-NEXT: [[TMP116:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS39]], i64 0, i64 1
// CHECK17-NEXT: store i8* null, i8** [[TMP116]], align 8
// CHECK17-NEXT: [[TMP117:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 2
// CHECK17-NEXT: [[TMP118:%.*]] = bitcast i8** [[TMP117]] to i32**
// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP118]], align 8
// CHECK17-NEXT: [[TMP119:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 2
// CHECK17-NEXT: [[TMP120:%.*]] = bitcast i8** [[TMP119]] to i32**
// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP120]], align 8
// CHECK17-NEXT: [[TMP121:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 2
// CHECK17-NEXT: store i64 [[TMP104]], i64* [[TMP121]], align 8
// CHECK17-NEXT: [[TMP122:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS39]], i64 0, i64 2
// CHECK17-NEXT: store i8* null, i8** [[TMP122]], align 8
// CHECK17-NEXT: [[TMP123:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 0
// CHECK17-NEXT: [[TMP124:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 0
// CHECK17-NEXT: [[TMP125:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 0
// CHECK17-NEXT: [[TMP126:%.*]] = load i32, i32* [[N]], align 4
// CHECK17-NEXT: store i32 [[TMP126]], i32* [[DOTCAPTURE_EXPR_42]], align 4
// CHECK17-NEXT: [[TMP127:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_42]], align 4
// CHECK17-NEXT: [[SUB44:%.*]] = sub nsw i32 [[TMP127]], 0
// CHECK17-NEXT: [[DIV45:%.*]] = sdiv i32 [[SUB44]], 1
// CHECK17-NEXT: [[SUB46:%.*]] = sub nsw i32 [[DIV45]], 1
// CHECK17-NEXT: store i32 [[SUB46]], i32* [[DOTCAPTURE_EXPR_43]], align 4
// CHECK17-NEXT: [[TMP128:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_43]], align 4
// CHECK17-NEXT: [[ADD47:%.*]] = add nsw i32 [[TMP128]], 1
// CHECK17-NEXT: [[TMP129:%.*]] = zext i32 [[ADD47]] to i64
// CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP129]])
// CHECK17-NEXT: [[TMP130:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151.region_id, i32 3, i8** [[TMP123]], i8** [[TMP124]], i64* [[TMP125]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK17-NEXT: [[TMP131:%.*]] = icmp ne i32 [[TMP130]], 0
// CHECK17-NEXT: br i1 [[TMP131]], label [[OMP_OFFLOAD_FAILED48:%.*]], label [[OMP_OFFLOAD_CONT49:%.*]]
// CHECK17: omp_offload.failed48:
// CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151(i64 [[TMP103]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]]
// CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT49]]
// CHECK17: omp_offload.cont49:
// CHECK17-NEXT: [[TMP132:%.*]] = load i32, i32* [[M]], align 4
// CHECK17-NEXT: store i32 [[TMP132]], i32* [[DOTCAPTURE_EXPR_50]], align 4
// CHECK17-NEXT: [[TMP133:%.*]] = load i32, i32* [[N]], align 4
// CHECK17-NEXT: [[CONV52:%.*]] = bitcast i64* [[N_CASTED51]] to i32*
// CHECK17-NEXT: store i32 [[TMP133]], i32* [[CONV52]], align 4
// CHECK17-NEXT: [[TMP134:%.*]] = load i64, i64* [[N_CASTED51]], align 8
// CHECK17-NEXT: [[TMP135:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_50]], align 4
// CHECK17-NEXT: [[CONV54:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED53]] to i32*
// CHECK17-NEXT: store i32 [[TMP135]], i32* [[CONV54]], align 4
// CHECK17-NEXT: [[TMP136:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED53]], align 8
// CHECK17-NEXT: [[TMP137:%.*]] = mul nuw i64 [[TMP1]], 4
// CHECK17-NEXT: [[TMP138:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 0
// CHECK17-NEXT: [[TMP139:%.*]] = bitcast i8** [[TMP138]] to i64*
// CHECK17-NEXT: store i64 [[TMP134]], i64* [[TMP139]], align 8
// CHECK17-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 0
// CHECK17-NEXT: [[TMP141:%.*]] = bitcast i8** [[TMP140]] to i64*
// CHECK17-NEXT: store i64 [[TMP134]], i64* [[TMP141]], align 8
// CHECK17-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 0
// CHECK17-NEXT: store i64 4, i64* [[TMP142]], align 8
// CHECK17-NEXT: [[TMP143:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 0
// CHECK17-NEXT: store i8* null, i8** [[TMP143]], align 8
// CHECK17-NEXT: [[TMP144:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 1
// CHECK17-NEXT: [[TMP145:%.*]] = bitcast i8** [[TMP144]] to i64*
// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP145]], align 8
// CHECK17-NEXT: [[TMP146:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 1
// CHECK17-NEXT: [[TMP147:%.*]] = bitcast i8** [[TMP146]] to i64*
// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP147]], align 8
// CHECK17-NEXT: [[TMP148:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 1
// CHECK17-NEXT: store i64 8, i64* [[TMP148]], align 8
// CHECK17-NEXT: [[TMP149:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 1
// CHECK17-NEXT: store i8* null, i8** [[TMP149]], align 8
// CHECK17-NEXT: [[TMP150:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 2
// CHECK17-NEXT: [[TMP151:%.*]] = bitcast i8** [[TMP150]] to i32**
// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP151]], align 8
// CHECK17-NEXT: [[TMP152:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 2
// CHECK17-NEXT: [[TMP153:%.*]] = bitcast i8** [[TMP152]] to i32**
// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP153]], align 8
// CHECK17-NEXT: [[TMP154:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 2
// CHECK17-NEXT: store i64 [[TMP137]], i64* [[TMP154]], align 8
// CHECK17-NEXT: [[TMP155:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 2
// CHECK17-NEXT: store i8* null, i8** [[TMP155]], align 8
// CHECK17-NEXT: [[TMP156:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 3
// CHECK17-NEXT: [[TMP157:%.*]] = bitcast i8** [[TMP156]] to i64*
// CHECK17-NEXT: store i64 [[TMP136]], i64* [[TMP157]], align 8
// CHECK17-NEXT: [[TMP158:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 3
// CHECK17-NEXT: [[TMP159:%.*]] = bitcast i8** [[TMP158]] to i64*
// CHECK17-NEXT: store i64 [[TMP136]], i64* [[TMP159]], align 8
// CHECK17-NEXT: [[TMP160:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 3
// CHECK17-NEXT: store i64 4, i64* [[TMP160]], align 8
// CHECK17-NEXT: [[TMP161:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 3
// CHECK17-NEXT: store i8* null, i8** [[TMP161]], align 8
// CHECK17-NEXT: [[TMP162:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 0
// CHECK17-NEXT: [[TMP163:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 0
// CHECK17-NEXT: [[TMP164:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 0
// CHECK17-NEXT: [[TMP165:%.*]] = load i32, i32* [[N]], align 4
// CHECK17-NEXT: store i32 [[TMP165]], i32* [[DOTCAPTURE_EXPR_60]], align 4
// CHECK17-NEXT: [[TMP166:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_60]], align 4
// CHECK17-NEXT: [[SUB62:%.*]] = sub nsw i32 [[TMP166]], 0
// CHECK17-NEXT: [[DIV63:%.*]] = sdiv i32 [[SUB62]], 1
// CHECK17-NEXT: [[SUB64:%.*]] = sub nsw i32 [[DIV63]], 1
// CHECK17-NEXT: store i32 [[SUB64]], i32* [[DOTCAPTURE_EXPR_61]], align 4
// CHECK17-NEXT: [[TMP167:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_61]], align 4
// CHECK17-NEXT: [[ADD65:%.*]] = add nsw i32 [[TMP167]], 1
// CHECK17-NEXT: [[TMP168:%.*]] = zext i32 [[ADD65]] to i64
// CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP168]])
// CHECK17-NEXT: [[TMP169:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155.region_id, i32 4, i8** [[TMP162]], i8** [[TMP163]], i64* [[TMP164]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK17-NEXT: [[TMP170:%.*]] = icmp ne i32 [[TMP169]], 0
// CHECK17-NEXT: br i1 [[TMP170]], label [[OMP_OFFLOAD_FAILED66:%.*]], label [[OMP_OFFLOAD_CONT67:%.*]]
// CHECK17: omp_offload.failed66:
// CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155(i64 [[TMP134]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP136]]) #[[ATTR3]]
// CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT67]]
// CHECK17: omp_offload.cont67:
// CHECK17-NEXT: [[TMP171:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4
// CHECK17-NEXT: [[CALL:%.*]] = call signext i32 @_Z5tmainIiLi10EEiT_(i32 signext [[TMP171]])
// CHECK17-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4
// CHECK17-NEXT: [[TMP172:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
// CHECK17-NEXT: call void @llvm.stackrestore(i8* [[TMP172]])
// CHECK17-NEXT: [[TMP173:%.*]] = load i32, i32* [[RETVAL]], align 4
// CHECK17-NEXT: ret i32 [[TMP173]]
//
//
// CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139
// CHECK17-SAME: (i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2:[0-9]+]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
// CHECK17-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
// CHECK17-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK17-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
// CHECK17-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK17-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK17-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[N_CASTED]] to i32*
// CHECK17-NEXT: store i32 [[TMP2]], i32* [[CONV1]], align 4
// CHECK17-NEXT: [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8
// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]])
// CHECK17-NEXT: ret void
//
//
// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[I3:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
// CHECK17-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK17-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
// CHECK17-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK17-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
// CHECK17-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK17-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK17-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK17-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK17-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK17-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK17-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK17-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK17-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK17-NEXT: store i32 0, i32* [[I]], align 4
// CHECK17-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK17-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK17-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK17: omp.precond.then:
// CHECK17-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK17-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK17-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK17-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK17-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
// CHECK17-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK17-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK17-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
// CHECK17-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK17: cond.true:
// CHECK17-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK17-NEXT: br label [[COND_END:%.*]]
// CHECK17: cond.false:
// CHECK17-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: br label [[COND_END]]
// CHECK17: cond.end:
// CHECK17-NEXT: [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
// CHECK17-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK17-NEXT: store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK17: omp.inner.for.cond:
// CHECK17-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
// CHECK17-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK17: omp.inner.for.body:
// CHECK17-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK17-NEXT: [[TMP16:%.*]] = zext i32 [[TMP15]] to i64
// CHECK17-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
// CHECK17-NEXT: [[TMP19:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK17-NEXT: [[CONV6:%.*]] = bitcast i64* [[N_CASTED]] to i32*
// CHECK17-NEXT: store i32 [[TMP19]], i32* [[CONV6]], align 4
// CHECK17-NEXT: [[TMP20:%.*]] = load i64, i64* [[N_CASTED]], align 8
// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]])
// CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK17: omp.inner.for.inc:
// CHECK17-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
// CHECK17-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK17: omp.inner.for.end:
// CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK17: omp.loop.exit:
// CHECK17-NEXT: [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
// CHECK17-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
// CHECK17-NEXT: br label [[OMP_PRECOND_END]]
// CHECK17: omp.precond.end:
// CHECK17-NEXT: ret void
//
//
// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[I5:%.*]] = alloca i32, align 4
// CHECK17-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK17-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK17-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK17-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
// CHECK17-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK17-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
// CHECK17-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK17-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK17-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK17-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK17-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK17-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK17-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK17-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK17-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK17-NEXT: store i32 0, i32* [[I]], align 4
// CHECK17-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK17-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK17-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK17: omp.precond.then:
// CHECK17-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK17-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK17-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
// CHECK17-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK17-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP6]] to i32
// CHECK17-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK17-NEXT: [[CONV4:%.*]] = trunc i64 [[TMP7]] to i32
// CHECK17-NEXT: store i32 [[CONV3]], i32* [[DOTOMP_LB]], align 4
// CHECK17-NEXT: store i32 [[CONV4]], i32* [[DOTOMP_UB]], align 4
// CHECK17-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK17-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK17-NEXT: [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
// CHECK17-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP9]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK17-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK17-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK17-NEXT: [[CMP6:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
// CHECK17-NEXT: br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK17: cond.true:
// CHECK17-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK17-NEXT: br label [[COND_END:%.*]]
// CHECK17: cond.false:
// CHECK17-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK17-NEXT: br label [[COND_END]]
// CHECK17: cond.end:
// CHECK17-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
// CHECK17-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK17-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK17-NEXT: store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK17: omp.inner.for.cond:
// CHECK17-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK17-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
// CHECK17-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK17: omp.inner.for.body:
// CHECK17-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP17]], 1
// CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK17-NEXT: store i32 [[ADD]], i32* [[I5]], align 4
// CHECK17-NEXT: [[TMP18:%.*]] = load i32, i32* [[I5]], align 4
// CHECK17-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP18]] to i64
// CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 [[IDXPROM]]
// CHECK17-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK17: omp.body.continue:
// CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK17: omp.inner.for.inc:
// CHECK17-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP19]], 1
// CHECK17-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK17: omp.inner.for.end:
// CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK17: omp.loop.exit:
// CHECK17-NEXT: [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
// CHECK17-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
// CHECK17-NEXT: br label [[OMP_PRECOND_END]]
// CHECK17: omp.precond.end:
// CHECK17-NEXT: ret void
//
//
// CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143
// CHECK17-SAME: (i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
// CHECK17-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
// CHECK17-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK17-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
// CHECK17-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK17-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK17-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[N_CASTED]] to i32*
// CHECK17-NEXT: store i32 [[TMP2]], i32* [[CONV1]], align 4
// CHECK17-NEXT: [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8
// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]])
// CHECK17-NEXT: ret void
//
//
// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..2
// CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[I3:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
// CHECK17-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK17-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
// CHECK17-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK17-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
// CHECK17-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK17-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK17-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK17-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK17-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK17-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK17-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK17-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK17-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK17-NEXT: store i32 0, i32* [[I]], align 4
// CHECK17-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK17-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK17-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK17: omp.precond.then:
// CHECK17-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK17-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK17-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK17-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK17-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
// CHECK17-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK17-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK17-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
// CHECK17-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK17: cond.true:
// CHECK17-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK17-NEXT: br label [[COND_END:%.*]]
// CHECK17: cond.false:
// CHECK17-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: br label [[COND_END]]
// CHECK17: cond.end:
// CHECK17-NEXT: [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
// CHECK17-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK17-NEXT: store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK17: omp.inner.for.cond:
// CHECK17-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
// CHECK17-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK17: omp.inner.for.body:
// CHECK17-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK17-NEXT: [[TMP16:%.*]] = zext i32 [[TMP15]] to i64
// CHECK17-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
// CHECK17-NEXT: [[TMP19:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK17-NEXT: [[CONV6:%.*]] = bitcast i64* [[N_CASTED]] to i32*
// CHECK17-NEXT: store i32 [[TMP19]], i32* [[CONV6]], align 4
// CHECK17-NEXT: [[TMP20:%.*]] = load i64, i64* [[N_CASTED]], align 8
// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]])
// CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK17: omp.inner.for.inc:
// CHECK17-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
// CHECK17-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK17: omp.inner.for.end:
// CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK17: omp.loop.exit:
// CHECK17-NEXT: [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
// CHECK17-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
// CHECK17-NEXT: br label [[OMP_PRECOND_END]]
// CHECK17: omp.precond.end:
// CHECK17-NEXT: ret void
//
//
// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..3
// CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[I5:%.*]] = alloca i32, align 4
// CHECK17-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK17-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK17-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK17-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
// CHECK17-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK17-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
// CHECK17-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK17-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK17-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK17-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK17-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK17-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK17-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK17-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK17-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK17-NEXT: store i32 0, i32* [[I]], align 4
// CHECK17-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK17-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK17-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK17: omp.precond.then:
// CHECK17-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK17-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK17-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
// CHECK17-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK17-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP6]] to i32
// CHECK17-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK17-NEXT: [[CONV4:%.*]] = trunc i64 [[TMP7]] to i32
// CHECK17-NEXT: store i32 [[CONV3]], i32* [[DOTOMP_LB]], align 4
// CHECK17-NEXT: store i32 [[CONV4]], i32* [[DOTOMP_UB]], align 4
// CHECK17-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK17-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK17-NEXT: [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
// CHECK17-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP9]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK17-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK17-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK17-NEXT: [[CMP6:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
// CHECK17-NEXT: br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK17: cond.true:
// CHECK17-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK17-NEXT: br label [[COND_END:%.*]]
// CHECK17: cond.false:
// CHECK17-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK17-NEXT: br label [[COND_END]]
// CHECK17: cond.end:
// CHECK17-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
// CHECK17-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK17-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK17-NEXT: store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK17: omp.inner.for.cond:
// CHECK17-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK17-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
// CHECK17-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK17: omp.inner.for.body:
// CHECK17-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP17]], 1
// CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK17-NEXT: store i32 [[ADD]], i32* [[I5]], align 4
// CHECK17-NEXT: [[TMP18:%.*]] = load i32, i32* [[I5]], align 4
// CHECK17-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP18]] to i64
// CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 [[IDXPROM]]
// CHECK17-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK17: omp.body.continue:
// CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK17: omp.inner.for.inc:
// CHECK17-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP19]], 1
// CHECK17-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK17: omp.inner.for.end:
// CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK17: omp.loop.exit:
// CHECK17-NEXT: [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
// CHECK17-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
// CHECK17-NEXT: br label [[OMP_PRECOND_END]]
// CHECK17: omp.precond.end:
// CHECK17-NEXT: ret void
//
//
// CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147
// CHECK17-SAME: (i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
// CHECK17-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
// CHECK17-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK17-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK17-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
// CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
// CHECK17-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK17-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
// CHECK17-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK17-NEXT: [[CONV2:%.*]] = bitcast i64* [[N_CASTED]] to i32*
// CHECK17-NEXT: store i32 [[TMP2]], i32* [[CONV2]], align 4
// CHECK17-NEXT: [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8
// CHECK17-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV1]], align 8
// CHECK17-NEXT: [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
// CHECK17-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4
// CHECK17-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]])
// CHECK17-NEXT: ret void
//
//
// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..5
// CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[I5:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
// CHECK17-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK17-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
// CHECK17-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK17-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK17-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
// CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
// CHECK17-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK17-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
// CHECK17-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK17-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK17-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK17-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK17-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK17-NEXT: [[SUB4:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK17-NEXT: store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK17-NEXT: store i32 0, i32* [[I]], align 4
// CHECK17-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK17-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK17-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK17: omp.precond.then:
// CHECK17-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK17-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK17-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK17-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK17-NEXT: [[TMP6:%.*]] = load i32, i32* [[CONV1]], align 8
// CHECK17-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
// CHECK17-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP6]])
// CHECK17-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK17-NEXT: [[CMP6:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
// CHECK17-NEXT: br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK17: cond.true:
// CHECK17-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK17-NEXT: br label [[COND_END:%.*]]
// CHECK17: cond.false:
// CHECK17-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: br label [[COND_END]]
// CHECK17: cond.end:
// CHECK17-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
// CHECK17-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK17-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK17: omp.inner.for.cond:
// CHECK17-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP15]], 1
// CHECK17-NEXT: [[CMP7:%.*]] = icmp slt i32 [[TMP14]], [[ADD]]
// CHECK17-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK17: omp.inner.for.body:
// CHECK17-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK17-NEXT: [[TMP17:%.*]] = zext i32 [[TMP16]] to i64
// CHECK17-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: [[TMP19:%.*]] = zext i32 [[TMP18]] to i64
// CHECK17-NEXT: [[TMP20:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK17-NEXT: [[CONV8:%.*]] = bitcast i64* [[N_CASTED]] to i32*
// CHECK17-NEXT: store i32 [[TMP20]], i32* [[CONV8]], align 4
// CHECK17-NEXT: [[TMP21:%.*]] = load i64, i64* [[N_CASTED]], align 8
// CHECK17-NEXT: [[TMP22:%.*]] = load i32, i32* [[CONV1]], align 8
// CHECK17-NEXT: [[CONV9:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
// CHECK17-NEXT: store i32 [[TMP22]], i32* [[CONV9]], align 4
// CHECK17-NEXT: [[TMP23:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*, i64)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i64 [[TMP21]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP23]])
// CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK17: omp.inner.for.inc:
// CHECK17-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK17-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP24]], [[TMP25]]
// CHECK17-NEXT: store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: [[TMP26:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK17-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK17-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP26]], [[TMP27]]
// CHECK17-NEXT: store i32 [[ADD11]], i32* [[DOTOMP_COMB_LB]], align 4
// CHECK17-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK17-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP28]], [[TMP29]]
// CHECK17-NEXT: store i32 [[ADD12]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK17-NEXT: [[CMP13:%.*]] = icmp sgt i32 [[TMP30]], [[TMP31]]
// CHECK17-NEXT: br i1 [[CMP13]], label [[COND_TRUE14:%.*]], label [[COND_FALSE15:%.*]]
// CHECK17: cond.true14:
// CHECK17-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK17-NEXT: br label [[COND_END16:%.*]]
// CHECK17: cond.false15:
// CHECK17-NEXT: [[TMP33:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: br label [[COND_END16]]
// CHECK17: cond.end16:
// CHECK17-NEXT: [[COND17:%.*]] = phi i32 [ [[TMP32]], [[COND_TRUE14]] ], [ [[TMP33]], [[COND_FALSE15]] ]
// CHECK17-NEXT: store i32 [[COND17]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: [[TMP34:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK17-NEXT: store i32 [[TMP34]], i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK17: omp.inner.for.end:
// CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK17: omp.loop.exit:
// CHECK17-NEXT: [[TMP35:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: [[TMP36:%.*]] = load i32, i32* [[TMP35]], align 4
// CHECK17-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP36]])
// CHECK17-NEXT: br label [[OMP_PRECOND_END]]
// CHECK17: omp.precond.end:
// CHECK17-NEXT: ret void
//
//
// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..6
// CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[I7:%.*]] = alloca i32, align 4
// CHECK17-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK17-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK17-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK17-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
// CHECK17-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK17-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK17-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
// CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
// CHECK17-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK17-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
// CHECK17-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK17-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK17-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK17-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK17-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK17-NEXT: [[SUB4:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK17-NEXT: store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK17-NEXT: store i32 0, i32* [[I]], align 4
// CHECK17-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK17-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK17-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK17: omp.precond.then:
// CHECK17-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK17-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK17-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
// CHECK17-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK17-NEXT: [[CONV5:%.*]] = trunc i64 [[TMP6]] to i32
// CHECK17-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK17-NEXT: [[CONV6:%.*]] = trunc i64 [[TMP7]] to i32
// CHECK17-NEXT: store i32 [[CONV5]], i32* [[DOTOMP_LB]], align 4
// CHECK17-NEXT: store i32 [[CONV6]], i32* [[DOTOMP_UB]], align 4
// CHECK17-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK17-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK17-NEXT: [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
// CHECK17-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP9]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK17-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK17-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK17-NEXT: [[CMP8:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
// CHECK17-NEXT: br i1 [[CMP8]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK17: cond.true:
// CHECK17-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK17-NEXT: br label [[COND_END:%.*]]
// CHECK17: cond.false:
// CHECK17-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK17-NEXT: br label [[COND_END]]
// CHECK17: cond.end:
// CHECK17-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
// CHECK17-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK17-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK17-NEXT: store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK17: omp.inner.for.cond:
// CHECK17-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK17-NEXT: [[CMP9:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
// CHECK17-NEXT: br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK17: omp.inner.for.body:
// CHECK17-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP17]], 1
// CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK17-NEXT: store i32 [[ADD]], i32* [[I7]], align 4
// CHECK17-NEXT: [[TMP18:%.*]] = load i32, i32* [[I7]], align 4
// CHECK17-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP18]] to i64
// CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 [[IDXPROM]]
// CHECK17-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK17: omp.body.continue:
// CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK17: omp.inner.for.inc:
// CHECK17-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP19]], 1
// CHECK17-NEXT: store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK17: omp.inner.for.end:
// CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK17: omp.loop.exit:
// CHECK17-NEXT: [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
// CHECK17-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
// CHECK17-NEXT: br label [[OMP_PRECOND_END]]
// CHECK17: omp.precond.end:
// CHECK17-NEXT: ret void
//
//
// CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151
// CHECK17-SAME: (i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
// CHECK17-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
// CHECK17-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK17-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
// CHECK17-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK17-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK17-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[N_CASTED]] to i32*
// CHECK17-NEXT: store i32 [[TMP2]], i32* [[CONV1]], align 4
// CHECK17-NEXT: [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8
// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]])
// CHECK17-NEXT: ret void
//
//
// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..8
// CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[I3:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
// CHECK17-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK17-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
// CHECK17-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK17-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
// CHECK17-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK17-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK17-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK17-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK17-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK17-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK17-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK17-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK17-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK17-NEXT: store i32 0, i32* [[I]], align 4
// CHECK17-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK17-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK17-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK17: omp.precond.then:
// CHECK17-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK17-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK17-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK17-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK17-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
// CHECK17-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK17-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK17-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
// CHECK17-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK17: cond.true:
// CHECK17-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK17-NEXT: br label [[COND_END:%.*]]
// CHECK17: cond.false:
// CHECK17-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: br label [[COND_END]]
// CHECK17: cond.end:
// CHECK17-NEXT: [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
// CHECK17-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK17-NEXT: store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK17: omp.inner.for.cond:
// CHECK17-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
// CHECK17-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK17: omp.inner.for.body:
// CHECK17-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK17-NEXT: [[TMP16:%.*]] = zext i32 [[TMP15]] to i64
// CHECK17-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
// CHECK17-NEXT: [[TMP19:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK17-NEXT: [[CONV6:%.*]] = bitcast i64* [[N_CASTED]] to i32*
// CHECK17-NEXT: store i32 [[TMP19]], i32* [[CONV6]], align 4
// CHECK17-NEXT: [[TMP20:%.*]] = load i64, i64* [[N_CASTED]], align 8
// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]])
// CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK17: omp.inner.for.inc:
// CHECK17-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
// CHECK17-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK17: omp.inner.for.end:
// CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK17: omp.loop.exit:
// CHECK17-NEXT: [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
// CHECK17-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
// CHECK17-NEXT: br label [[OMP_PRECOND_END]]
// CHECK17: omp.precond.end:
// CHECK17-NEXT: ret void
//
//
// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..9
// CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[I5:%.*]] = alloca i32, align 4
// CHECK17-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK17-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK17-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK17-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
// CHECK17-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK17-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
// CHECK17-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK17-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK17-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK17-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK17-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK17-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK17-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK17-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK17-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK17-NEXT: store i32 0, i32* [[I]], align 4
// CHECK17-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK17-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK17-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK17: omp.precond.then:
// CHECK17-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK17-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK17-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
// CHECK17-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK17-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP6]] to i32
// CHECK17-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK17-NEXT: [[CONV4:%.*]] = trunc i64 [[TMP7]] to i32
// CHECK17-NEXT: store i32 [[CONV3]], i32* [[DOTOMP_LB]], align 4
// CHECK17-NEXT: store i32 [[CONV4]], i32* [[DOTOMP_UB]], align 4
// CHECK17-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK17-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK17-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK17-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK17-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
// CHECK17-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP11]], i32 1073741859, i32 [[TMP8]], i32 [[TMP9]], i32 1, i32 1)
// CHECK17-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK17: omp.dispatch.cond:
// CHECK17-NEXT: [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
// CHECK17-NEXT: [[TMP14:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP13]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
// CHECK17-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP14]], 0
// CHECK17-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK17: omp.dispatch.body:
// CHECK17-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK17-NEXT: store i32 [[TMP15]], i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK17: omp.inner.for.cond:
// CHECK17-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15
// CHECK17-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !15
// CHECK17-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
// CHECK17-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK17: omp.inner.for.body:
// CHECK17-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15
// CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
// CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK17-NEXT: store i32 [[ADD]], i32* [[I5]], align 4, !llvm.access.group !15
// CHECK17-NEXT: [[TMP19:%.*]] = load i32, i32* [[I5]], align 4, !llvm.access.group !15
// CHECK17-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP19]] to i64
// CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 [[IDXPROM]]
// CHECK17-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !15
// CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK17: omp.body.continue:
// CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK17: omp.inner.for.inc:
// CHECK17-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15
// CHECK17-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP20]], 1
// CHECK17-NEXT: store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15
// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]]
// CHECK17: omp.inner.for.end:
// CHECK17-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK17: omp.dispatch.inc:
// CHECK17-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK17: omp.dispatch.end:
// CHECK17-NEXT: br label [[OMP_PRECOND_END]]
// CHECK17: omp.precond.end:
// CHECK17-NEXT: ret void
//
//
// CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155
// CHECK17-SAME: (i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
// CHECK17-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
// CHECK17-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK17-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK17-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
// CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
// CHECK17-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK17-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
// CHECK17-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK17-NEXT: [[CONV2:%.*]] = bitcast i64* [[N_CASTED]] to i32*
// CHECK17-NEXT: store i32 [[TMP2]], i32* [[CONV2]], align 4
// CHECK17-NEXT: [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8
// CHECK17-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV1]], align 8
// CHECK17-NEXT: [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
// CHECK17-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4
// CHECK17-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]])
// CHECK17-NEXT: ret void
//
//
// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..11
// CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[I5:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
// CHECK17-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK17-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
// CHECK17-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK17-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK17-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
// CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
// CHECK17-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK17-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
// CHECK17-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK17-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK17-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK17-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK17-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK17-NEXT: [[SUB4:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK17-NEXT: store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK17-NEXT: store i32 0, i32* [[I]], align 4
// CHECK17-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK17-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK17-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK17: omp.precond.then:
// CHECK17-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK17-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK17-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK17-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK17-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
// CHECK17-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK17-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK17-NEXT: [[CMP6:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
// CHECK17-NEXT: br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK17: cond.true:
// CHECK17-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK17-NEXT: br label [[COND_END:%.*]]
// CHECK17: cond.false:
// CHECK17-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: br label [[COND_END]]
// CHECK17: cond.end:
// CHECK17-NEXT: [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
// CHECK17-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK17-NEXT: store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK17: omp.inner.for.cond:
// CHECK17-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
// CHECK17-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK17: omp.inner.for.body:
// CHECK17-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK17-NEXT: [[TMP16:%.*]] = zext i32 [[TMP15]] to i64
// CHECK17-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
// CHECK17-NEXT: [[TMP19:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK17-NEXT: [[CONV8:%.*]] = bitcast i64* [[N_CASTED]] to i32*
// CHECK17-NEXT: store i32 [[TMP19]], i32* [[CONV8]], align 4
// CHECK17-NEXT: [[TMP20:%.*]] = load i64, i64* [[N_CASTED]], align 8
// CHECK17-NEXT: [[TMP21:%.*]] = load i32, i32* [[CONV1]], align 8
// CHECK17-NEXT: [[CONV9:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
// CHECK17-NEXT: store i32 [[TMP21]], i32* [[CONV9]], align 4
// CHECK17-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*, i64)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP22]])
// CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK17: omp.inner.for.inc:
// CHECK17-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
// CHECK17-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK17: omp.inner.for.end:
// CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK17: omp.loop.exit:
// CHECK17-NEXT: [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
// CHECK17-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP26]])
// CHECK17-NEXT: br label [[OMP_PRECOND_END]]
// CHECK17: omp.precond.end:
// CHECK17-NEXT: ret void
//
//
// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..12
// CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[I7:%.*]] = alloca i32, align 4
// CHECK17-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK17-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK17-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK17-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
// CHECK17-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK17-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK17-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
// CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
// CHECK17-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK17-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
// CHECK17-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK17-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK17-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK17-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK17-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK17-NEXT: [[SUB4:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK17-NEXT: store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK17-NEXT: store i32 0, i32* [[I]], align 4
// CHECK17-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK17-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK17-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK17: omp.precond.then:
// CHECK17-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK17-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK17-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
// CHECK17-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK17-NEXT: [[CONV5:%.*]] = trunc i64 [[TMP6]] to i32
// CHECK17-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK17-NEXT: [[CONV6:%.*]] = trunc i64 [[TMP7]] to i32
// CHECK17-NEXT: store i32 [[CONV5]], i32* [[DOTOMP_LB]], align 4
// CHECK17-NEXT: store i32 [[CONV6]], i32* [[DOTOMP_UB]], align 4
// CHECK17-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK17-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK17-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV1]], align 8
// CHECK17-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK17-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK17-NEXT: [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
// CHECK17-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP12]], i32 1073741859, i32 [[TMP9]], i32 [[TMP10]], i32 1, i32 [[TMP8]])
// CHECK17-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK17: omp.dispatch.cond:
// CHECK17-NEXT: [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
// CHECK17-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
// CHECK17-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP15]], 0
// CHECK17-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK17: omp.dispatch.body:
// CHECK17-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK17-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK17: omp.inner.for.cond:
// CHECK17-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
// CHECK17-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !18
// CHECK17-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
// CHECK17-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK17: omp.inner.for.body:
// CHECK17-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
// CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
// CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK17-NEXT: store i32 [[ADD]], i32* [[I7]], align 4, !llvm.access.group !18
// CHECK17-NEXT: [[TMP20:%.*]] = load i32, i32* [[I7]], align 4, !llvm.access.group !18
// CHECK17-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP20]] to i64
// CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 [[IDXPROM]]
// CHECK17-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !18
// CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK17: omp.body.continue:
// CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK17: omp.inner.for.inc:
// CHECK17-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
// CHECK17-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP21]], 1
// CHECK17-NEXT: store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
// CHECK17: omp.inner.for.end:
// CHECK17-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK17: omp.dispatch.inc:
// CHECK17-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK17: omp.dispatch.end:
// CHECK17-NEXT: br label [[OMP_PRECOND_END]]
// CHECK17: omp.precond.end:
// CHECK17-NEXT: ret void
//
//
// CHECK17-LABEL: define {{[^@]+}}@_Z5tmainIiLi10EEiT_
// CHECK17-SAME: (i32 signext [[ARGC:%.*]]) #[[ATTR4:[0-9]+]] comdat {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[A:%.*]] = alloca [10 x i32], align 4
// CHECK17-NEXT: [[M:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8
// CHECK17-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8
// CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8
// CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS1:%.*]] = alloca [1 x i8*], align 8
// CHECK17-NEXT: [[DOTOFFLOAD_PTRS2:%.*]] = alloca [1 x i8*], align 8
// CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS3:%.*]] = alloca [1 x i8*], align 8
// CHECK17-NEXT: [[_TMP4:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS7:%.*]] = alloca [2 x i8*], align 8
// CHECK17-NEXT: [[DOTOFFLOAD_PTRS8:%.*]] = alloca [2 x i8*], align 8
// CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS9:%.*]] = alloca [2 x i8*], align 8
// CHECK17-NEXT: [[_TMP10:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS13:%.*]] = alloca [1 x i8*], align 8
// CHECK17-NEXT: [[DOTOFFLOAD_PTRS14:%.*]] = alloca [1 x i8*], align 8
// CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS15:%.*]] = alloca [1 x i8*], align 8
// CHECK17-NEXT: [[_TMP16:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTCAPTURE_EXPR_19:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTCAPTURE_EXPR__CASTED20:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS22:%.*]] = alloca [2 x i8*], align 8
// CHECK17-NEXT: [[DOTOFFLOAD_PTRS23:%.*]] = alloca [2 x i8*], align 8
// CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS24:%.*]] = alloca [2 x i8*], align 8
// CHECK17-NEXT: [[_TMP25:%.*]] = alloca i32, align 4
// CHECK17-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
// CHECK17-NEXT: store i32 10, i32* [[M]], align 4
// CHECK17-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK17-NEXT: [[TMP1:%.*]] = bitcast i8** [[TMP0]] to [10 x i32]**
// CHECK17-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP1]], align 8
// CHECK17-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK17-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to [10 x i32]**
// CHECK17-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP3]], align 8
// CHECK17-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
// CHECK17-NEXT: store i8* null, i8** [[TMP4]], align 8
// CHECK17-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK17-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
// CHECK17-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK17-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0
// CHECK17-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK17: omp_offload.failed:
// CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112([10 x i32]* [[A]]) #[[ATTR3]]
// CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK17: omp_offload.cont:
// CHECK17-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
// CHECK17-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to [10 x i32]**
// CHECK17-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP10]], align 8
// CHECK17-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
// CHECK17-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to [10 x i32]**
// CHECK17-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP12]], align 8
// CHECK17-NEXT: [[TMP13:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i64 0, i64 0
// CHECK17-NEXT: store i8* null, i8** [[TMP13]], align 8
// CHECK17-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
// CHECK17-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
// CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
// CHECK17-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK17-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
// CHECK17-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]]
// CHECK17: omp_offload.failed5:
// CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116([10 x i32]* [[A]]) #[[ATTR3]]
// CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT6]]
// CHECK17: omp_offload.cont6:
// CHECK17-NEXT: [[TMP18:%.*]] = load i32, i32* [[M]], align 4
// CHECK17-NEXT: store i32 [[TMP18]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK17-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
// CHECK17-NEXT: store i32 [[TMP19]], i32* [[CONV]], align 4
// CHECK17-NEXT: [[TMP20:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
// CHECK17-NEXT: [[TMP21:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0
// CHECK17-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to [10 x i32]**
// CHECK17-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP22]], align 8
// CHECK17-NEXT: [[TMP23:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0
// CHECK17-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to [10 x i32]**
// CHECK17-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP24]], align 8
// CHECK17-NEXT: [[TMP25:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i64 0, i64 0
// CHECK17-NEXT: store i8* null, i8** [[TMP25]], align 8
// CHECK17-NEXT: [[TMP26:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 1
// CHECK17-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i64*
// CHECK17-NEXT: store i64 [[TMP20]], i64* [[TMP27]], align 8
// CHECK17-NEXT: [[TMP28:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 1
// CHECK17-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64*
// CHECK17-NEXT: store i64 [[TMP20]], i64* [[TMP29]], align 8
// CHECK17-NEXT: [[TMP30:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i64 0, i64 1
// CHECK17-NEXT: store i8* null, i8** [[TMP30]], align 8
// CHECK17-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0
// CHECK17-NEXT: [[TMP32:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0
// CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
// CHECK17-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.23, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.24, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK17-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0
// CHECK17-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]]
// CHECK17: omp_offload.failed11:
// CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120([10 x i32]* [[A]], i64 [[TMP20]]) #[[ATTR3]]
// CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT12]]
// CHECK17: omp_offload.cont12:
// CHECK17-NEXT: [[TMP35:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0
// CHECK17-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to [10 x i32]**
// CHECK17-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP36]], align 8
// CHECK17-NEXT: [[TMP37:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0
// CHECK17-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to [10 x i32]**
// CHECK17-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP38]], align 8
// CHECK17-NEXT: [[TMP39:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i64 0, i64 0
// CHECK17-NEXT: store i8* null, i8** [[TMP39]], align 8
// CHECK17-NEXT: [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0
// CHECK17-NEXT: [[TMP41:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0
// CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
// CHECK17-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124.region_id, i32 1, i8** [[TMP40]], i8** [[TMP41]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.27, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.28, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK17-NEXT: [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0
// CHECK17-NEXT: br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]]
// CHECK17: omp_offload.failed17:
// CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124([10 x i32]* [[A]]) #[[ATTR3]]
// CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT18]]
// CHECK17: omp_offload.cont18:
// CHECK17-NEXT: [[TMP44:%.*]] = load i32, i32* [[M]], align 4
// CHECK17-NEXT: store i32 [[TMP44]], i32* [[DOTCAPTURE_EXPR_19]], align 4
// CHECK17-NEXT: [[TMP45:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_19]], align 4
// CHECK17-NEXT: [[CONV21:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED20]] to i32*
// CHECK17-NEXT: store i32 [[TMP45]], i32* [[CONV21]], align 4
// CHECK17-NEXT: [[TMP46:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED20]], align 8
// CHECK17-NEXT: [[TMP47:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0
// CHECK17-NEXT: [[TMP48:%.*]] = bitcast i8** [[TMP47]] to [10 x i32]**
// CHECK17-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP48]], align 8
// CHECK17-NEXT: [[TMP49:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0
// CHECK17-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to [10 x i32]**
// CHECK17-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP50]], align 8
// CHECK17-NEXT: [[TMP51:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 0
// CHECK17-NEXT: store i8* null, i8** [[TMP51]], align 8
// CHECK17-NEXT: [[TMP52:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 1
// CHECK17-NEXT: [[TMP53:%.*]] = bitcast i8** [[TMP52]] to i64*
// CHECK17-NEXT: store i64 [[TMP46]], i64* [[TMP53]], align 8
// CHECK17-NEXT: [[TMP54:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 1
// CHECK17-NEXT: [[TMP55:%.*]] = bitcast i8** [[TMP54]] to i64*
// CHECK17-NEXT: store i64 [[TMP46]], i64* [[TMP55]], align 8
// CHECK17-NEXT: [[TMP56:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 1
// CHECK17-NEXT: store i8* null, i8** [[TMP56]], align 8
// CHECK17-NEXT: [[TMP57:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0
// CHECK17-NEXT: [[TMP58:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0
// CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
// CHECK17-NEXT: [[TMP59:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128.region_id, i32 2, i8** [[TMP57]], i8** [[TMP58]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.31, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.32, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK17-NEXT: [[TMP60:%.*]] = icmp ne i32 [[TMP59]], 0
// CHECK17-NEXT: br i1 [[TMP60]], label [[OMP_OFFLOAD_FAILED26:%.*]], label [[OMP_OFFLOAD_CONT27:%.*]]
// CHECK17: omp_offload.failed26:
// CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128([10 x i32]* [[A]], i64 [[TMP46]]) #[[ATTR3]]
// CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT27]]
// CHECK17: omp_offload.cont27:
// CHECK17-NEXT: ret i32 0
//
//
// CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112
// CHECK17-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK17-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
// CHECK17-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]])
// CHECK17-NEXT: ret void
//
//
// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..14
// CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK17-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK17-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
// CHECK17-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
// CHECK17-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK17-NEXT: store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK17-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK17-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK17-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK17-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
// CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK17: cond.true:
// CHECK17-NEXT: br label [[COND_END:%.*]]
// CHECK17: cond.false:
// CHECK17-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: br label [[COND_END]]
// CHECK17: cond.end:
// CHECK17-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK17-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK17-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK17: omp.inner.for.cond:
// CHECK17-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK17-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK17: omp.inner.for.body:
// CHECK17-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK17-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
// CHECK17-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]])
// CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK17: omp.inner.for.inc:
// CHECK17-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
// CHECK17-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK17: omp.inner.for.end:
// CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK17: omp.loop.exit:
// CHECK17-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK17-NEXT: ret void
//
//
// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..15
// CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK17-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK17-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK17-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK17-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
// CHECK17-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
// CHECK17-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK17-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4
// CHECK17-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK17-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
// CHECK17-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK17-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
// CHECK17-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
// CHECK17-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
// CHECK17-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK17-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK17-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK17-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK17-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 9
// CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK17: cond.true:
// CHECK17-NEXT: br label [[COND_END:%.*]]
// CHECK17: cond.false:
// CHECK17-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK17-NEXT: br label [[COND_END]]
// CHECK17: cond.end:
// CHECK17-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
// CHECK17-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK17-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK17-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK17: omp.inner.for.cond:
// CHECK17-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK17-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
// CHECK17-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK17: omp.inner.for.body:
// CHECK17-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
// CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK17-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK17-NEXT: [[TMP11:%.*]] = load i32, i32* [[I]], align 4
// CHECK17-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
// CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]]
// CHECK17-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK17: omp.body.continue:
// CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK17: omp.inner.for.inc:
// CHECK17-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
// CHECK17-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK17: omp.inner.for.end:
// CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK17: omp.loop.exit:
// CHECK17-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
// CHECK17-NEXT: ret void
//
//
// CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116
// CHECK17-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK17-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
// CHECK17-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..17 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]])
// CHECK17-NEXT: ret void
//
//
// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..17
// CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK17-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK17-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
// CHECK17-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
// CHECK17-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK17-NEXT: store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK17-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK17-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK17-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK17-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
// CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK17: cond.true:
// CHECK17-NEXT: br label [[COND_END:%.*]]
// CHECK17: cond.false:
// CHECK17-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: br label [[COND_END]]
// CHECK17: cond.end:
// CHECK17-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK17-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK17-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK17: omp.inner.for.cond:
// CHECK17-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK17-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK17: omp.inner.for.body:
// CHECK17-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK17-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
// CHECK17-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]])
// CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK17: omp.inner.for.inc:
// CHECK17-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
// CHECK17-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK17: omp.inner.for.end:
// CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK17: omp.loop.exit:
// CHECK17-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK17-NEXT: ret void
//
//
// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..18
// CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK17-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK17-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK17-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK17-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
// CHECK17-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
// CHECK17-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK17-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4
// CHECK17-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK17-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
// CHECK17-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK17-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
// CHECK17-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
// CHECK17-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
// CHECK17-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK17-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK17-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK17-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK17-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 9
// CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK17: cond.true:
// CHECK17-NEXT: br label [[COND_END:%.*]]
// CHECK17: cond.false:
// CHECK17-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK17-NEXT: br label [[COND_END]]
// CHECK17: cond.end:
// CHECK17-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
// CHECK17-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK17-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK17-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK17: omp.inner.for.cond:
// CHECK17-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK17-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
// CHECK17-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK17: omp.inner.for.body:
// CHECK17-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
// CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK17-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK17-NEXT: [[TMP11:%.*]] = load i32, i32* [[I]], align 4
// CHECK17-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
// CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]]
// CHECK17-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK17: omp.body.continue:
// CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK17: omp.inner.for.inc:
// CHECK17-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
// CHECK17-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK17: omp.inner.for.end:
// CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK17: omp.loop.exit:
// CHECK17-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
// CHECK17-NEXT: ret void
//
//
// CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120
// CHECK17-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK17-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
// CHECK17-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
// CHECK17-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
// CHECK17-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
// CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
// CHECK17-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
// CHECK17-NEXT: store i32 [[TMP1]], i32* [[CONV1]], align 4
// CHECK17-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..21 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP2]])
// CHECK17-NEXT: ret void
//
//
// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..21
// CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK17-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
// CHECK17-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK17-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
// CHECK17-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
// CHECK17-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
// CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
// CHECK17-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK17-NEXT: store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK17-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK17-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK17-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK17-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
// CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK17: cond.true:
// CHECK17-NEXT: br label [[COND_END:%.*]]
// CHECK17: cond.false:
// CHECK17-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: br label [[COND_END]]
// CHECK17: cond.end:
// CHECK17-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK17-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK17-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK17: omp.inner.for.cond:
// CHECK17-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK17-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK17: omp.inner.for.body:
// CHECK17-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK17-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
// CHECK17-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
// CHECK17-NEXT: [[TMP12:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK17-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
// CHECK17-NEXT: store i32 [[TMP12]], i32* [[CONV2]], align 4
// CHECK17-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]])
// CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK17: omp.inner.for.inc:
// CHECK17-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]]
// CHECK17-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK17: omp.inner.for.end:
// CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK17: omp.loop.exit:
// CHECK17-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK17-NEXT: ret void
//
//
// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..22
// CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK17-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK17-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK17-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK17-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK17-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
// CHECK17-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
// CHECK17-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
// CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
// CHECK17-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK17-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4
// CHECK17-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK17-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
// CHECK17-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK17-NEXT: [[CONV2:%.*]] = trunc i64 [[TMP2]] to i32
// CHECK17-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_LB]], align 4
// CHECK17-NEXT: store i32 [[CONV2]], i32* [[DOTOMP_UB]], align 4
// CHECK17-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK17-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK17-NEXT: [[TMP3:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK17-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
// CHECK17-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP5]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP3]])
// CHECK17-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK17: omp.dispatch.cond:
// CHECK17-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK17-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK17-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP7]] to i32
// CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], [[CONV3]]
// CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK17: cond.true:
// CHECK17-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK17-NEXT: [[CONV4:%.*]] = trunc i64 [[TMP8]] to i32
// CHECK17-NEXT: br label [[COND_END:%.*]]
// CHECK17: cond.false:
// CHECK17-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK17-NEXT: br label [[COND_END]]
// CHECK17: cond.end:
// CHECK17-NEXT: [[COND:%.*]] = phi i32 [ [[CONV4]], [[COND_TRUE]] ], [ [[TMP9]], [[COND_FALSE]] ]
// CHECK17-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK17-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK17-NEXT: store i32 [[TMP10]], i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK17-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]]
// CHECK17-NEXT: br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK17: omp.dispatch.body:
// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK17: omp.inner.for.cond:
// CHECK17-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK17-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
// CHECK17-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK17: omp.inner.for.body:
// CHECK17-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP15]], 1
// CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK17-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK17-NEXT: [[TMP16:%.*]] = load i32, i32* [[I]], align 4
// CHECK17-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP16]] to i64
// CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]]
// CHECK17-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK17: omp.body.continue:
// CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK17: omp.inner.for.inc:
// CHECK17-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP17]], 1
// CHECK17-NEXT: store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK17: omp.inner.for.end:
// CHECK17-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK17: omp.dispatch.inc:
// CHECK17-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK17-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK17-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
// CHECK17-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_LB]], align 4
// CHECK17-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK17-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK17-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
// CHECK17-NEXT: store i32 [[ADD9]], i32* [[DOTOMP_UB]], align 4
// CHECK17-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK17: omp.dispatch.end:
// CHECK17-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
// CHECK17-NEXT: ret void
//
//
// CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124
// CHECK17-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK17-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
// CHECK17-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..25 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]])
// CHECK17-NEXT: ret void
//
//
// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..25
// CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK17-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK17-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
// CHECK17-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
// CHECK17-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK17-NEXT: store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK17-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK17-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK17-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK17-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
// CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK17: cond.true:
// CHECK17-NEXT: br label [[COND_END:%.*]]
// CHECK17: cond.false:
// CHECK17-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: br label [[COND_END]]
// CHECK17: cond.end:
// CHECK17-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK17-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK17-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK17: omp.inner.for.cond:
// CHECK17-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK17-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK17: omp.inner.for.body:
// CHECK17-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK17-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
// CHECK17-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]])
// CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK17: omp.inner.for.inc:
// CHECK17-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
// CHECK17-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK17: omp.inner.for.end:
// CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK17: omp.loop.exit:
// CHECK17-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK17-NEXT: ret void
//
//
// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..26
// CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK17-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK17-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK17-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK17-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
// CHECK17-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
// CHECK17-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK17-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4
// CHECK17-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK17-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
// CHECK17-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK17-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
// CHECK17-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
// CHECK17-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
// CHECK17-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK17-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK17-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK17-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK17-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
// CHECK17-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32 1073741859, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 1)
// CHECK17-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK17: omp.dispatch.cond:
// CHECK17-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
// CHECK17-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
// CHECK17-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK17: omp.dispatch.body:
// CHECK17-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK17-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK17: omp.inner.for.cond:
// CHECK17-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
// CHECK17-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !21
// CHECK17-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
// CHECK17-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK17: omp.inner.for.body:
// CHECK17-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
// CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
// CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK17-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !21
// CHECK17-NEXT: [[TMP12:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !21
// CHECK17-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP12]] to i64
// CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]]
// CHECK17-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !21
// CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK17: omp.body.continue:
// CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK17: omp.inner.for.inc:
// CHECK17-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
// CHECK17-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP13]], 1
// CHECK17-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]]
// CHECK17: omp.inner.for.end:
// CHECK17-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK17: omp.dispatch.inc:
// CHECK17-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK17: omp.dispatch.end:
// CHECK17-NEXT: ret void
//
//
// CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128
// CHECK17-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK17-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
// CHECK17-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
// CHECK17-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
// CHECK17-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
// CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
// CHECK17-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
// CHECK17-NEXT: store i32 [[TMP1]], i32* [[CONV1]], align 4
// CHECK17-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..29 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP2]])
// CHECK17-NEXT: ret void
//
//
// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..29
// CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK17-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
// CHECK17-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK17-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
// CHECK17-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
// CHECK17-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
// CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
// CHECK17-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK17-NEXT: store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK17-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK17-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK17-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK17-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
// CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK17: cond.true:
// CHECK17-NEXT: br label [[COND_END:%.*]]
// CHECK17: cond.false:
// CHECK17-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: br label [[COND_END]]
// CHECK17: cond.end:
// CHECK17-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK17-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK17-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK17: omp.inner.for.cond:
// CHECK17-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK17-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK17: omp.inner.for.body:
// CHECK17-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK17-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
// CHECK17-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK17-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
// CHECK17-NEXT: [[TMP12:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK17-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
// CHECK17-NEXT: store i32 [[TMP12]], i32* [[CONV2]], align 4
// CHECK17-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..30 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]])
// CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK17: omp.inner.for.inc:
// CHECK17-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]]
// CHECK17-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK17: omp.inner.for.end:
// CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK17: omp.loop.exit:
// CHECK17-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK17-NEXT: ret void
//
//
// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..30
// CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK17-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK17-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK17-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK17-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK17-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
// CHECK17-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
// CHECK17-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
// CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
// CHECK17-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK17-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4
// CHECK17-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK17-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
// CHECK17-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK17-NEXT: [[CONV2:%.*]] = trunc i64 [[TMP2]] to i32
// CHECK17-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_LB]], align 4
// CHECK17-NEXT: store i32 [[CONV2]], i32* [[DOTOMP_UB]], align 4
// CHECK17-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK17-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK17-NEXT: [[TMP3:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK17-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK17-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK17-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
// CHECK17-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], i32 1073741859, i32 [[TMP4]], i32 [[TMP5]], i32 1, i32 [[TMP3]])
// CHECK17-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK17: omp.dispatch.cond:
// CHECK17-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
// CHECK17-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP8]], 0
// CHECK17-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK17: omp.dispatch.body:
// CHECK17-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK17-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK17: omp.inner.for.cond:
// CHECK17-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
// CHECK17-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !24
// CHECK17-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
// CHECK17-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK17: omp.inner.for.body:
// CHECK17-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
// CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
// CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK17-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !24
// CHECK17-NEXT: [[TMP13:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !24
// CHECK17-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP13]] to i64
// CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]]
// CHECK17-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !24
// CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK17: omp.body.continue:
// CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK17: omp.inner.for.inc:
// CHECK17-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
// CHECK17-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP14]], 1
// CHECK17-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]]
// CHECK17: omp.inner.for.end:
// CHECK17-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK17: omp.dispatch.inc:
// CHECK17-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK17: omp.dispatch.end:
// CHECK17-NEXT: ret void
//
//
// CHECK17-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
// CHECK17-SAME: () #[[ATTR5:[0-9]+]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: call void @__tgt_register_requires(i64 1)
// CHECK17-NEXT: ret void
//
//
// CHECK18-LABEL: define {{[^@]+}}@main
// CHECK18-SAME: (i32 signext [[ARGC:%.*]], i8** [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK18-NEXT: entry:
// CHECK18-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[ARGV_ADDR:%.*]] = alloca i8**, align 8
// CHECK18-NEXT: [[N:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
// CHECK18-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[M:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8
// CHECK18-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8
// CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8
// CHECK18-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8
// CHECK18-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[N_CASTED3:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [3 x i8*], align 8
// CHECK18-NEXT: [[DOTOFFLOAD_PTRS6:%.*]] = alloca [3 x i8*], align 8
// CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [3 x i8*], align 8
// CHECK18-NEXT: [[DOTOFFLOAD_SIZES8:%.*]] = alloca [3 x i64], align 8
// CHECK18-NEXT: [[_TMP9:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTCAPTURE_EXPR_11:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTCAPTURE_EXPR_18:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[N_CASTED19:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS22:%.*]] = alloca [4 x i8*], align 8
// CHECK18-NEXT: [[DOTOFFLOAD_PTRS23:%.*]] = alloca [4 x i8*], align 8
// CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS24:%.*]] = alloca [4 x i8*], align 8
// CHECK18-NEXT: [[DOTOFFLOAD_SIZES25:%.*]] = alloca [4 x i64], align 8
// CHECK18-NEXT: [[_TMP26:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTCAPTURE_EXPR_27:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTCAPTURE_EXPR_28:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[N_CASTED35:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS37:%.*]] = alloca [3 x i8*], align 8
// CHECK18-NEXT: [[DOTOFFLOAD_PTRS38:%.*]] = alloca [3 x i8*], align 8
// CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS39:%.*]] = alloca [3 x i8*], align 8
// CHECK18-NEXT: [[DOTOFFLOAD_SIZES40:%.*]] = alloca [3 x i64], align 8
// CHECK18-NEXT: [[_TMP41:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTCAPTURE_EXPR_42:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTCAPTURE_EXPR_43:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTCAPTURE_EXPR_50:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[N_CASTED51:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[DOTCAPTURE_EXPR__CASTED53:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS55:%.*]] = alloca [4 x i8*], align 8
// CHECK18-NEXT: [[DOTOFFLOAD_PTRS56:%.*]] = alloca [4 x i8*], align 8
// CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS57:%.*]] = alloca [4 x i8*], align 8
// CHECK18-NEXT: [[DOTOFFLOAD_SIZES58:%.*]] = alloca [4 x i64], align 8
// CHECK18-NEXT: [[_TMP59:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTCAPTURE_EXPR_60:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTCAPTURE_EXPR_61:%.*]] = alloca i32, align 4
// CHECK18-NEXT: store i32 0, i32* [[RETVAL]], align 4
// CHECK18-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
// CHECK18-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8
// CHECK18-NEXT: store i32 100, i32* [[N]], align 4
// CHECK18-NEXT: [[TMP0:%.*]] = load i32, i32* [[N]], align 4
// CHECK18-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
// CHECK18-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave()
// CHECK18-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8
// CHECK18-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4
// CHECK18-NEXT: store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8
// CHECK18-NEXT: store i32 10, i32* [[M]], align 4
// CHECK18-NEXT: [[TMP3:%.*]] = load i32, i32* [[N]], align 4
// CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[N_CASTED]] to i32*
// CHECK18-NEXT: store i32 [[TMP3]], i32* [[CONV]], align 4
// CHECK18-NEXT: [[TMP4:%.*]] = load i64, i64* [[N_CASTED]], align 8
// CHECK18-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP1]], 4
// CHECK18-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK18-NEXT: [[TMP7:%.*]] = bitcast i8** [[TMP6]] to i64*
// CHECK18-NEXT: store i64 [[TMP4]], i64* [[TMP7]], align 8
// CHECK18-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK18-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64*
// CHECK18-NEXT: store i64 [[TMP4]], i64* [[TMP9]], align 8
// CHECK18-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
// CHECK18-NEXT: store i64 4, i64* [[TMP10]], align 8
// CHECK18-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
// CHECK18-NEXT: store i8* null, i8** [[TMP11]], align 8
// CHECK18-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
// CHECK18-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64*
// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP13]], align 8
// CHECK18-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
// CHECK18-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64*
// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP15]], align 8
// CHECK18-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1
// CHECK18-NEXT: store i64 8, i64* [[TMP16]], align 8
// CHECK18-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
// CHECK18-NEXT: store i8* null, i8** [[TMP17]], align 8
// CHECK18-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
// CHECK18-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32**
// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 8
// CHECK18-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
// CHECK18-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32**
// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 8
// CHECK18-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2
// CHECK18-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 8
// CHECK18-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
// CHECK18-NEXT: store i8* null, i8** [[TMP23]], align 8
// CHECK18-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK18-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK18-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
// CHECK18-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4
// CHECK18-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK18-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK18-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0
// CHECK18-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK18-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK18-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK18-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK18-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1
// CHECK18-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64
// CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]])
// CHECK18-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK18-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
// CHECK18-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK18: omp_offload.failed:
// CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139(i64 [[TMP4]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]]
// CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK18: omp_offload.cont:
// CHECK18-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4
// CHECK18-NEXT: [[CONV4:%.*]] = bitcast i64* [[N_CASTED3]] to i32*
// CHECK18-NEXT: store i32 [[TMP33]], i32* [[CONV4]], align 4
// CHECK18-NEXT: [[TMP34:%.*]] = load i64, i64* [[N_CASTED3]], align 8
// CHECK18-NEXT: [[TMP35:%.*]] = mul nuw i64 [[TMP1]], 4
// CHECK18-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0
// CHECK18-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i64*
// CHECK18-NEXT: store i64 [[TMP34]], i64* [[TMP37]], align 8
// CHECK18-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0
// CHECK18-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i64*
// CHECK18-NEXT: store i64 [[TMP34]], i64* [[TMP39]], align 8
// CHECK18-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0
// CHECK18-NEXT: store i64 4, i64* [[TMP40]], align 8
// CHECK18-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0
// CHECK18-NEXT: store i8* null, i8** [[TMP41]], align 8
// CHECK18-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1
// CHECK18-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i64*
// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP43]], align 8
// CHECK18-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1
// CHECK18-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i64*
// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP45]], align 8
// CHECK18-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 1
// CHECK18-NEXT: store i64 8, i64* [[TMP46]], align 8
// CHECK18-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1
// CHECK18-NEXT: store i8* null, i8** [[TMP47]], align 8
// CHECK18-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2
// CHECK18-NEXT: [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32**
// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP49]], align 8
// CHECK18-NEXT: [[TMP50:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2
// CHECK18-NEXT: [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i32**
// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP51]], align 8
// CHECK18-NEXT: [[TMP52:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 2
// CHECK18-NEXT: store i64 [[TMP35]], i64* [[TMP52]], align 8
// CHECK18-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2
// CHECK18-NEXT: store i8* null, i8** [[TMP53]], align 8
// CHECK18-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0
// CHECK18-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0
// CHECK18-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0
// CHECK18-NEXT: [[TMP57:%.*]] = load i32, i32* [[N]], align 4
// CHECK18-NEXT: store i32 [[TMP57]], i32* [[DOTCAPTURE_EXPR_10]], align 4
// CHECK18-NEXT: [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4
// CHECK18-NEXT: [[SUB12:%.*]] = sub nsw i32 [[TMP58]], 0
// CHECK18-NEXT: [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1
// CHECK18-NEXT: [[SUB14:%.*]] = sub nsw i32 [[DIV13]], 1
// CHECK18-NEXT: store i32 [[SUB14]], i32* [[DOTCAPTURE_EXPR_11]], align 4
// CHECK18-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4
// CHECK18-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP59]], 1
// CHECK18-NEXT: [[TMP60:%.*]] = zext i32 [[ADD15]] to i64
// CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP60]])
// CHECK18-NEXT: [[TMP61:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143.region_id, i32 3, i8** [[TMP54]], i8** [[TMP55]], i64* [[TMP56]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK18-NEXT: [[TMP62:%.*]] = icmp ne i32 [[TMP61]], 0
// CHECK18-NEXT: br i1 [[TMP62]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]]
// CHECK18: omp_offload.failed16:
// CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143(i64 [[TMP34]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]]
// CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT17]]
// CHECK18: omp_offload.cont17:
// CHECK18-NEXT: [[TMP63:%.*]] = load i32, i32* [[M]], align 4
// CHECK18-NEXT: store i32 [[TMP63]], i32* [[DOTCAPTURE_EXPR_18]], align 4
// CHECK18-NEXT: [[TMP64:%.*]] = load i32, i32* [[N]], align 4
// CHECK18-NEXT: [[CONV20:%.*]] = bitcast i64* [[N_CASTED19]] to i32*
// CHECK18-NEXT: store i32 [[TMP64]], i32* [[CONV20]], align 4
// CHECK18-NEXT: [[TMP65:%.*]] = load i64, i64* [[N_CASTED19]], align 8
// CHECK18-NEXT: [[TMP66:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_18]], align 4
// CHECK18-NEXT: [[CONV21:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
// CHECK18-NEXT: store i32 [[TMP66]], i32* [[CONV21]], align 4
// CHECK18-NEXT: [[TMP67:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
// CHECK18-NEXT: [[TMP68:%.*]] = mul nuw i64 [[TMP1]], 4
// CHECK18-NEXT: [[TMP69:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0
// CHECK18-NEXT: [[TMP70:%.*]] = bitcast i8** [[TMP69]] to i64*
// CHECK18-NEXT: store i64 [[TMP65]], i64* [[TMP70]], align 8
// CHECK18-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0
// CHECK18-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i64*
// CHECK18-NEXT: store i64 [[TMP65]], i64* [[TMP72]], align 8
// CHECK18-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 0
// CHECK18-NEXT: store i64 4, i64* [[TMP73]], align 8
// CHECK18-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 0
// CHECK18-NEXT: store i8* null, i8** [[TMP74]], align 8
// CHECK18-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 1
// CHECK18-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i64*
// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP76]], align 8
// CHECK18-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 1
// CHECK18-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i64*
// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP78]], align 8
// CHECK18-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 1
// CHECK18-NEXT: store i64 8, i64* [[TMP79]], align 8
// CHECK18-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 1
// CHECK18-NEXT: store i8* null, i8** [[TMP80]], align 8
// CHECK18-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 2
// CHECK18-NEXT: [[TMP82:%.*]] = bitcast i8** [[TMP81]] to i32**
// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP82]], align 8
// CHECK18-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 2
// CHECK18-NEXT: [[TMP84:%.*]] = bitcast i8** [[TMP83]] to i32**
// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP84]], align 8
// CHECK18-NEXT: [[TMP85:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 2
// CHECK18-NEXT: store i64 [[TMP68]], i64* [[TMP85]], align 8
// CHECK18-NEXT: [[TMP86:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 2
// CHECK18-NEXT: store i8* null, i8** [[TMP86]], align 8
// CHECK18-NEXT: [[TMP87:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 3
// CHECK18-NEXT: [[TMP88:%.*]] = bitcast i8** [[TMP87]] to i64*
// CHECK18-NEXT: store i64 [[TMP67]], i64* [[TMP88]], align 8
// CHECK18-NEXT: [[TMP89:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 3
// CHECK18-NEXT: [[TMP90:%.*]] = bitcast i8** [[TMP89]] to i64*
// CHECK18-NEXT: store i64 [[TMP67]], i64* [[TMP90]], align 8
// CHECK18-NEXT: [[TMP91:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 3
// CHECK18-NEXT: store i64 4, i64* [[TMP91]], align 8
// CHECK18-NEXT: [[TMP92:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 3
// CHECK18-NEXT: store i8* null, i8** [[TMP92]], align 8
// CHECK18-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0
// CHECK18-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0
// CHECK18-NEXT: [[TMP95:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 0
// CHECK18-NEXT: [[TMP96:%.*]] = load i32, i32* [[N]], align 4
// CHECK18-NEXT: store i32 [[TMP96]], i32* [[DOTCAPTURE_EXPR_27]], align 4
// CHECK18-NEXT: [[TMP97:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_27]], align 4
// CHECK18-NEXT: [[SUB29:%.*]] = sub nsw i32 [[TMP97]], 0
// CHECK18-NEXT: [[DIV30:%.*]] = sdiv i32 [[SUB29]], 1
// CHECK18-NEXT: [[SUB31:%.*]] = sub nsw i32 [[DIV30]], 1
// CHECK18-NEXT: store i32 [[SUB31]], i32* [[DOTCAPTURE_EXPR_28]], align 4
// CHECK18-NEXT: [[TMP98:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_28]], align 4
// CHECK18-NEXT: [[ADD32:%.*]] = add nsw i32 [[TMP98]], 1
// CHECK18-NEXT: [[TMP99:%.*]] = zext i32 [[ADD32]] to i64
// CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP99]])
// CHECK18-NEXT: [[TMP100:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147.region_id, i32 4, i8** [[TMP93]], i8** [[TMP94]], i64* [[TMP95]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK18-NEXT: [[TMP101:%.*]] = icmp ne i32 [[TMP100]], 0
// CHECK18-NEXT: br i1 [[TMP101]], label [[OMP_OFFLOAD_FAILED33:%.*]], label [[OMP_OFFLOAD_CONT34:%.*]]
// CHECK18: omp_offload.failed33:
// CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147(i64 [[TMP65]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP67]]) #[[ATTR3]]
// CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT34]]
// CHECK18: omp_offload.cont34:
// CHECK18-NEXT: [[TMP102:%.*]] = load i32, i32* [[N]], align 4
// CHECK18-NEXT: [[CONV36:%.*]] = bitcast i64* [[N_CASTED35]] to i32*
// CHECK18-NEXT: store i32 [[TMP102]], i32* [[CONV36]], align 4
// CHECK18-NEXT: [[TMP103:%.*]] = load i64, i64* [[N_CASTED35]], align 8
// CHECK18-NEXT: [[TMP104:%.*]] = mul nuw i64 [[TMP1]], 4
// CHECK18-NEXT: [[TMP105:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 0
// CHECK18-NEXT: [[TMP106:%.*]] = bitcast i8** [[TMP105]] to i64*
// CHECK18-NEXT: store i64 [[TMP103]], i64* [[TMP106]], align 8
// CHECK18-NEXT: [[TMP107:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 0
// CHECK18-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to i64*
// CHECK18-NEXT: store i64 [[TMP103]], i64* [[TMP108]], align 8
// CHECK18-NEXT: [[TMP109:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 0
// CHECK18-NEXT: store i64 4, i64* [[TMP109]], align 8
// CHECK18-NEXT: [[TMP110:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS39]], i64 0, i64 0
// CHECK18-NEXT: store i8* null, i8** [[TMP110]], align 8
// CHECK18-NEXT: [[TMP111:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 1
// CHECK18-NEXT: [[TMP112:%.*]] = bitcast i8** [[TMP111]] to i64*
// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP112]], align 8
// CHECK18-NEXT: [[TMP113:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 1
// CHECK18-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to i64*
// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP114]], align 8
// CHECK18-NEXT: [[TMP115:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 1
// CHECK18-NEXT: store i64 8, i64* [[TMP115]], align 8
// CHECK18-NEXT: [[TMP116:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS39]], i64 0, i64 1
// CHECK18-NEXT: store i8* null, i8** [[TMP116]], align 8
// CHECK18-NEXT: [[TMP117:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 2
// CHECK18-NEXT: [[TMP118:%.*]] = bitcast i8** [[TMP117]] to i32**
// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP118]], align 8
// CHECK18-NEXT: [[TMP119:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 2
// CHECK18-NEXT: [[TMP120:%.*]] = bitcast i8** [[TMP119]] to i32**
// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP120]], align 8
// CHECK18-NEXT: [[TMP121:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 2
// CHECK18-NEXT: store i64 [[TMP104]], i64* [[TMP121]], align 8
// CHECK18-NEXT: [[TMP122:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS39]], i64 0, i64 2
// CHECK18-NEXT: store i8* null, i8** [[TMP122]], align 8
// CHECK18-NEXT: [[TMP123:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 0
// CHECK18-NEXT: [[TMP124:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 0
// CHECK18-NEXT: [[TMP125:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 0
// CHECK18-NEXT: [[TMP126:%.*]] = load i32, i32* [[N]], align 4
// CHECK18-NEXT: store i32 [[TMP126]], i32* [[DOTCAPTURE_EXPR_42]], align 4
// CHECK18-NEXT: [[TMP127:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_42]], align 4
// CHECK18-NEXT: [[SUB44:%.*]] = sub nsw i32 [[TMP127]], 0
// CHECK18-NEXT: [[DIV45:%.*]] = sdiv i32 [[SUB44]], 1
// CHECK18-NEXT: [[SUB46:%.*]] = sub nsw i32 [[DIV45]], 1
// CHECK18-NEXT: store i32 [[SUB46]], i32* [[DOTCAPTURE_EXPR_43]], align 4
// CHECK18-NEXT: [[TMP128:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_43]], align 4
// CHECK18-NEXT: [[ADD47:%.*]] = add nsw i32 [[TMP128]], 1
// CHECK18-NEXT: [[TMP129:%.*]] = zext i32 [[ADD47]] to i64
// CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP129]])
// CHECK18-NEXT: [[TMP130:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151.region_id, i32 3, i8** [[TMP123]], i8** [[TMP124]], i64* [[TMP125]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK18-NEXT: [[TMP131:%.*]] = icmp ne i32 [[TMP130]], 0
// CHECK18-NEXT: br i1 [[TMP131]], label [[OMP_OFFLOAD_FAILED48:%.*]], label [[OMP_OFFLOAD_CONT49:%.*]]
// CHECK18: omp_offload.failed48:
// CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151(i64 [[TMP103]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]]
// CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT49]]
// CHECK18: omp_offload.cont49:
// CHECK18-NEXT: [[TMP132:%.*]] = load i32, i32* [[M]], align 4
// CHECK18-NEXT: store i32 [[TMP132]], i32* [[DOTCAPTURE_EXPR_50]], align 4
// CHECK18-NEXT: [[TMP133:%.*]] = load i32, i32* [[N]], align 4
// CHECK18-NEXT: [[CONV52:%.*]] = bitcast i64* [[N_CASTED51]] to i32*
// CHECK18-NEXT: store i32 [[TMP133]], i32* [[CONV52]], align 4
// CHECK18-NEXT: [[TMP134:%.*]] = load i64, i64* [[N_CASTED51]], align 8
// CHECK18-NEXT: [[TMP135:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_50]], align 4
// CHECK18-NEXT: [[CONV54:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED53]] to i32*
// CHECK18-NEXT: store i32 [[TMP135]], i32* [[CONV54]], align 4
// CHECK18-NEXT: [[TMP136:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED53]], align 8
// CHECK18-NEXT: [[TMP137:%.*]] = mul nuw i64 [[TMP1]], 4
// CHECK18-NEXT: [[TMP138:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 0
// CHECK18-NEXT: [[TMP139:%.*]] = bitcast i8** [[TMP138]] to i64*
// CHECK18-NEXT: store i64 [[TMP134]], i64* [[TMP139]], align 8
// CHECK18-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 0
// CHECK18-NEXT: [[TMP141:%.*]] = bitcast i8** [[TMP140]] to i64*
// CHECK18-NEXT: store i64 [[TMP134]], i64* [[TMP141]], align 8
// CHECK18-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 0
// CHECK18-NEXT: store i64 4, i64* [[TMP142]], align 8
// CHECK18-NEXT: [[TMP143:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 0
// CHECK18-NEXT: store i8* null, i8** [[TMP143]], align 8
// CHECK18-NEXT: [[TMP144:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 1
// CHECK18-NEXT: [[TMP145:%.*]] = bitcast i8** [[TMP144]] to i64*
// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP145]], align 8
// CHECK18-NEXT: [[TMP146:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 1
// CHECK18-NEXT: [[TMP147:%.*]] = bitcast i8** [[TMP146]] to i64*
// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP147]], align 8
// CHECK18-NEXT: [[TMP148:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 1
// CHECK18-NEXT: store i64 8, i64* [[TMP148]], align 8
// CHECK18-NEXT: [[TMP149:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 1
// CHECK18-NEXT: store i8* null, i8** [[TMP149]], align 8
// CHECK18-NEXT: [[TMP150:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 2
// CHECK18-NEXT: [[TMP151:%.*]] = bitcast i8** [[TMP150]] to i32**
// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP151]], align 8
// CHECK18-NEXT: [[TMP152:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 2
// CHECK18-NEXT: [[TMP153:%.*]] = bitcast i8** [[TMP152]] to i32**
// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP153]], align 8
// CHECK18-NEXT: [[TMP154:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 2
// CHECK18-NEXT: store i64 [[TMP137]], i64* [[TMP154]], align 8
// CHECK18-NEXT: [[TMP155:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 2
// CHECK18-NEXT: store i8* null, i8** [[TMP155]], align 8
// CHECK18-NEXT: [[TMP156:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 3
// CHECK18-NEXT: [[TMP157:%.*]] = bitcast i8** [[TMP156]] to i64*
// CHECK18-NEXT: store i64 [[TMP136]], i64* [[TMP157]], align 8
// CHECK18-NEXT: [[TMP158:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 3
// CHECK18-NEXT: [[TMP159:%.*]] = bitcast i8** [[TMP158]] to i64*
// CHECK18-NEXT: store i64 [[TMP136]], i64* [[TMP159]], align 8
// CHECK18-NEXT: [[TMP160:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 3
// CHECK18-NEXT: store i64 4, i64* [[TMP160]], align 8
// CHECK18-NEXT: [[TMP161:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 3
// CHECK18-NEXT: store i8* null, i8** [[TMP161]], align 8
// CHECK18-NEXT: [[TMP162:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 0
// CHECK18-NEXT: [[TMP163:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 0
// CHECK18-NEXT: [[TMP164:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 0
// CHECK18-NEXT: [[TMP165:%.*]] = load i32, i32* [[N]], align 4
// CHECK18-NEXT: store i32 [[TMP165]], i32* [[DOTCAPTURE_EXPR_60]], align 4
// CHECK18-NEXT: [[TMP166:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_60]], align 4
// CHECK18-NEXT: [[SUB62:%.*]] = sub nsw i32 [[TMP166]], 0
// CHECK18-NEXT: [[DIV63:%.*]] = sdiv i32 [[SUB62]], 1
// CHECK18-NEXT: [[SUB64:%.*]] = sub nsw i32 [[DIV63]], 1
// CHECK18-NEXT: store i32 [[SUB64]], i32* [[DOTCAPTURE_EXPR_61]], align 4
// CHECK18-NEXT: [[TMP167:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_61]], align 4
// CHECK18-NEXT: [[ADD65:%.*]] = add nsw i32 [[TMP167]], 1
// CHECK18-NEXT: [[TMP168:%.*]] = zext i32 [[ADD65]] to i64
// CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP168]])
// CHECK18-NEXT: [[TMP169:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155.region_id, i32 4, i8** [[TMP162]], i8** [[TMP163]], i64* [[TMP164]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK18-NEXT: [[TMP170:%.*]] = icmp ne i32 [[TMP169]], 0
// CHECK18-NEXT: br i1 [[TMP170]], label [[OMP_OFFLOAD_FAILED66:%.*]], label [[OMP_OFFLOAD_CONT67:%.*]]
// CHECK18: omp_offload.failed66:
// CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155(i64 [[TMP134]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP136]]) #[[ATTR3]]
// CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT67]]
// CHECK18: omp_offload.cont67:
// CHECK18-NEXT: [[TMP171:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4
// CHECK18-NEXT: [[CALL:%.*]] = call signext i32 @_Z5tmainIiLi10EEiT_(i32 signext [[TMP171]])
// CHECK18-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4
// CHECK18-NEXT: [[TMP172:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
// CHECK18-NEXT: call void @llvm.stackrestore(i8* [[TMP172]])
// CHECK18-NEXT: [[TMP173:%.*]] = load i32, i32* [[RETVAL]], align 4
// CHECK18-NEXT: ret i32 [[TMP173]]
//
//
// CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139
// CHECK18-SAME: (i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2:[0-9]+]] {
// CHECK18-NEXT: entry:
// CHECK18-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
// CHECK18-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
// CHECK18-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK18-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
// CHECK18-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK18-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK18-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK18-NEXT: [[CONV1:%.*]] = bitcast i64* [[N_CASTED]] to i32*
// CHECK18-NEXT: store i32 [[TMP2]], i32* [[CONV1]], align 4
// CHECK18-NEXT: [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8
// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]])
// CHECK18-NEXT: ret void
//
//
// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK18-NEXT: entry:
// CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[I3:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
// CHECK18-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK18-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK18-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
// CHECK18-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK18-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
// CHECK18-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK18-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK18-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK18-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK18-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK18-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK18-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK18-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK18-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK18-NEXT: store i32 0, i32* [[I]], align 4
// CHECK18-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK18-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK18-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK18: omp.precond.then:
// CHECK18-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK18-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK18-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK18-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK18-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK18-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
// CHECK18-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK18-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK18-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
// CHECK18-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK18: cond.true:
// CHECK18-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK18-NEXT: br label [[COND_END:%.*]]
// CHECK18: cond.false:
// CHECK18-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: br label [[COND_END]]
// CHECK18: cond.end:
// CHECK18-NEXT: [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
// CHECK18-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK18-NEXT: store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK18: omp.inner.for.cond:
// CHECK18-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
// CHECK18-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK18: omp.inner.for.body:
// CHECK18-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK18-NEXT: [[TMP16:%.*]] = zext i32 [[TMP15]] to i64
// CHECK18-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
// CHECK18-NEXT: [[TMP19:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK18-NEXT: [[CONV6:%.*]] = bitcast i64* [[N_CASTED]] to i32*
// CHECK18-NEXT: store i32 [[TMP19]], i32* [[CONV6]], align 4
// CHECK18-NEXT: [[TMP20:%.*]] = load i64, i64* [[N_CASTED]], align 8
// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]])
// CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK18: omp.inner.for.inc:
// CHECK18-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK18-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
// CHECK18-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK18: omp.inner.for.end:
// CHECK18-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK18: omp.loop.exit:
// CHECK18-NEXT: [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK18-NEXT: [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
// CHECK18-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
// CHECK18-NEXT: br label [[OMP_PRECOND_END]]
// CHECK18: omp.precond.end:
// CHECK18-NEXT: ret void
//
//
// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK18-NEXT: entry:
// CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[I5:%.*]] = alloca i32, align 4
// CHECK18-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK18-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK18-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK18-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK18-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
// CHECK18-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK18-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
// CHECK18-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK18-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK18-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK18-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK18-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK18-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK18-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK18-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK18-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK18-NEXT: store i32 0, i32* [[I]], align 4
// CHECK18-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK18-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK18-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK18: omp.precond.then:
// CHECK18-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK18-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK18-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
// CHECK18-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK18-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP6]] to i32
// CHECK18-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK18-NEXT: [[CONV4:%.*]] = trunc i64 [[TMP7]] to i32
// CHECK18-NEXT: store i32 [[CONV3]], i32* [[DOTOMP_LB]], align 4
// CHECK18-NEXT: store i32 [[CONV4]], i32* [[DOTOMP_UB]], align 4
// CHECK18-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK18-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK18-NEXT: [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK18-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
// CHECK18-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP9]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK18-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK18-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK18-NEXT: [[CMP6:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
// CHECK18-NEXT: br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK18: cond.true:
// CHECK18-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK18-NEXT: br label [[COND_END:%.*]]
// CHECK18: cond.false:
// CHECK18-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK18-NEXT: br label [[COND_END]]
// CHECK18: cond.end:
// CHECK18-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
// CHECK18-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK18-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK18-NEXT: store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK18: omp.inner.for.cond:
// CHECK18-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK18-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
// CHECK18-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK18: omp.inner.for.body:
// CHECK18-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP17]], 1
// CHECK18-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK18-NEXT: store i32 [[ADD]], i32* [[I5]], align 4
// CHECK18-NEXT: [[TMP18:%.*]] = load i32, i32* [[I5]], align 4
// CHECK18-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP18]] to i64
// CHECK18-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 [[IDXPROM]]
// CHECK18-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK18-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK18: omp.body.continue:
// CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK18: omp.inner.for.inc:
// CHECK18-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP19]], 1
// CHECK18-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK18: omp.inner.for.end:
// CHECK18-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK18: omp.loop.exit:
// CHECK18-NEXT: [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK18-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
// CHECK18-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
// CHECK18-NEXT: br label [[OMP_PRECOND_END]]
// CHECK18: omp.precond.end:
// CHECK18-NEXT: ret void
//
//
// CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143
// CHECK18-SAME: (i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK18-NEXT: entry:
// CHECK18-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
// CHECK18-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
// CHECK18-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK18-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
// CHECK18-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK18-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK18-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK18-NEXT: [[CONV1:%.*]] = bitcast i64* [[N_CASTED]] to i32*
// CHECK18-NEXT: store i32 [[TMP2]], i32* [[CONV1]], align 4
// CHECK18-NEXT: [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8
// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]])
// CHECK18-NEXT: ret void
//
//
// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..2
// CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK18-NEXT: entry:
// CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[I3:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
// CHECK18-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK18-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK18-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
// CHECK18-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK18-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
// CHECK18-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK18-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK18-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK18-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK18-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK18-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK18-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK18-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK18-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK18-NEXT: store i32 0, i32* [[I]], align 4
// CHECK18-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK18-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK18-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK18: omp.precond.then:
// CHECK18-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK18-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK18-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK18-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK18-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK18-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
// CHECK18-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK18-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK18-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
// CHECK18-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK18: cond.true:
// CHECK18-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK18-NEXT: br label [[COND_END:%.*]]
// CHECK18: cond.false:
// CHECK18-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: br label [[COND_END]]
// CHECK18: cond.end:
// CHECK18-NEXT: [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
// CHECK18-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK18-NEXT: store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK18: omp.inner.for.cond:
// CHECK18-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
// CHECK18-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK18: omp.inner.for.body:
// CHECK18-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK18-NEXT: [[TMP16:%.*]] = zext i32 [[TMP15]] to i64
// CHECK18-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
// CHECK18-NEXT: [[TMP19:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK18-NEXT: [[CONV6:%.*]] = bitcast i64* [[N_CASTED]] to i32*
// CHECK18-NEXT: store i32 [[TMP19]], i32* [[CONV6]], align 4
// CHECK18-NEXT: [[TMP20:%.*]] = load i64, i64* [[N_CASTED]], align 8
// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]])
// CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK18: omp.inner.for.inc:
// CHECK18-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK18-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
// CHECK18-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK18: omp.inner.for.end:
// CHECK18-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK18: omp.loop.exit:
// CHECK18-NEXT: [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK18-NEXT: [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
// CHECK18-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
// CHECK18-NEXT: br label [[OMP_PRECOND_END]]
// CHECK18: omp.precond.end:
// CHECK18-NEXT: ret void
//
//
// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..3
// CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK18-NEXT: entry:
// CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[I5:%.*]] = alloca i32, align 4
// CHECK18-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK18-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK18-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK18-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK18-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
// CHECK18-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK18-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
// CHECK18-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK18-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK18-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK18-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK18-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK18-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK18-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK18-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK18-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK18-NEXT: store i32 0, i32* [[I]], align 4
// CHECK18-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK18-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK18-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK18: omp.precond.then:
// CHECK18-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK18-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK18-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
// CHECK18-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK18-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP6]] to i32
// CHECK18-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK18-NEXT: [[CONV4:%.*]] = trunc i64 [[TMP7]] to i32
// CHECK18-NEXT: store i32 [[CONV3]], i32* [[DOTOMP_LB]], align 4
// CHECK18-NEXT: store i32 [[CONV4]], i32* [[DOTOMP_UB]], align 4
// CHECK18-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK18-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK18-NEXT: [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK18-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
// CHECK18-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP9]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK18-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK18-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK18-NEXT: [[CMP6:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
// CHECK18-NEXT: br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK18: cond.true:
// CHECK18-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK18-NEXT: br label [[COND_END:%.*]]
// CHECK18: cond.false:
// CHECK18-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK18-NEXT: br label [[COND_END]]
// CHECK18: cond.end:
// CHECK18-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
// CHECK18-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK18-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK18-NEXT: store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK18: omp.inner.for.cond:
// CHECK18-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK18-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
// CHECK18-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK18: omp.inner.for.body:
// CHECK18-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP17]], 1
// CHECK18-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK18-NEXT: store i32 [[ADD]], i32* [[I5]], align 4
// CHECK18-NEXT: [[TMP18:%.*]] = load i32, i32* [[I5]], align 4
// CHECK18-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP18]] to i64
// CHECK18-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 [[IDXPROM]]
// CHECK18-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK18-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK18: omp.body.continue:
// CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK18: omp.inner.for.inc:
// CHECK18-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP19]], 1
// CHECK18-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK18: omp.inner.for.end:
// CHECK18-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK18: omp.loop.exit:
// CHECK18-NEXT: [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK18-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
// CHECK18-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
// CHECK18-NEXT: br label [[OMP_PRECOND_END]]
// CHECK18: omp.precond.end:
// CHECK18-NEXT: ret void
//
//
// CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147
// CHECK18-SAME: (i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK18-NEXT: entry:
// CHECK18-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
// CHECK18-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
// CHECK18-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK18-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK18-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
// CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
// CHECK18-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK18-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK18-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
// CHECK18-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK18-NEXT: [[CONV2:%.*]] = bitcast i64* [[N_CASTED]] to i32*
// CHECK18-NEXT: store i32 [[TMP2]], i32* [[CONV2]], align 4
// CHECK18-NEXT: [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8
// CHECK18-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV1]], align 8
// CHECK18-NEXT: [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
// CHECK18-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4
// CHECK18-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]])
// CHECK18-NEXT: ret void
//
//
// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..5
// CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK18-NEXT: entry:
// CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[I5:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
// CHECK18-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK18-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK18-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
// CHECK18-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK18-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK18-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
// CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
// CHECK18-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK18-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK18-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
// CHECK18-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK18-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK18-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK18-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK18-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK18-NEXT: [[SUB4:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK18-NEXT: store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK18-NEXT: store i32 0, i32* [[I]], align 4
// CHECK18-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK18-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK18-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK18: omp.precond.then:
// CHECK18-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK18-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK18-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK18-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK18-NEXT: [[TMP6:%.*]] = load i32, i32* [[CONV1]], align 8
// CHECK18-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK18-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
// CHECK18-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP6]])
// CHECK18-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK18-NEXT: [[CMP6:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
// CHECK18-NEXT: br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK18: cond.true:
// CHECK18-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK18-NEXT: br label [[COND_END:%.*]]
// CHECK18: cond.false:
// CHECK18-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: br label [[COND_END]]
// CHECK18: cond.end:
// CHECK18-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
// CHECK18-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK18-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK18: omp.inner.for.cond:
// CHECK18-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK18-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP15]], 1
// CHECK18-NEXT: [[CMP7:%.*]] = icmp slt i32 [[TMP14]], [[ADD]]
// CHECK18-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK18: omp.inner.for.body:
// CHECK18-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK18-NEXT: [[TMP17:%.*]] = zext i32 [[TMP16]] to i64
// CHECK18-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: [[TMP19:%.*]] = zext i32 [[TMP18]] to i64
// CHECK18-NEXT: [[TMP20:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK18-NEXT: [[CONV8:%.*]] = bitcast i64* [[N_CASTED]] to i32*
// CHECK18-NEXT: store i32 [[TMP20]], i32* [[CONV8]], align 4
// CHECK18-NEXT: [[TMP21:%.*]] = load i64, i64* [[N_CASTED]], align 8
// CHECK18-NEXT: [[TMP22:%.*]] = load i32, i32* [[CONV1]], align 8
// CHECK18-NEXT: [[CONV9:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
// CHECK18-NEXT: store i32 [[TMP22]], i32* [[CONV9]], align 4
// CHECK18-NEXT: [[TMP23:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*, i64)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i64 [[TMP21]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP23]])
// CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK18: omp.inner.for.inc:
// CHECK18-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK18-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP24]], [[TMP25]]
// CHECK18-NEXT: store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: [[TMP26:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK18-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK18-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP26]], [[TMP27]]
// CHECK18-NEXT: store i32 [[ADD11]], i32* [[DOTOMP_COMB_LB]], align 4
// CHECK18-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK18-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP28]], [[TMP29]]
// CHECK18-NEXT: store i32 [[ADD12]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK18-NEXT: [[CMP13:%.*]] = icmp sgt i32 [[TMP30]], [[TMP31]]
// CHECK18-NEXT: br i1 [[CMP13]], label [[COND_TRUE14:%.*]], label [[COND_FALSE15:%.*]]
// CHECK18: cond.true14:
// CHECK18-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK18-NEXT: br label [[COND_END16:%.*]]
// CHECK18: cond.false15:
// CHECK18-NEXT: [[TMP33:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: br label [[COND_END16]]
// CHECK18: cond.end16:
// CHECK18-NEXT: [[COND17:%.*]] = phi i32 [ [[TMP32]], [[COND_TRUE14]] ], [ [[TMP33]], [[COND_FALSE15]] ]
// CHECK18-NEXT: store i32 [[COND17]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: [[TMP34:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK18-NEXT: store i32 [[TMP34]], i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK18: omp.inner.for.end:
// CHECK18-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK18: omp.loop.exit:
// CHECK18-NEXT: [[TMP35:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK18-NEXT: [[TMP36:%.*]] = load i32, i32* [[TMP35]], align 4
// CHECK18-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP36]])
// CHECK18-NEXT: br label [[OMP_PRECOND_END]]
// CHECK18: omp.precond.end:
// CHECK18-NEXT: ret void
//
//
// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..6
// CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK18-NEXT: entry:
// CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[I7:%.*]] = alloca i32, align 4
// CHECK18-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK18-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK18-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK18-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK18-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
// CHECK18-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK18-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK18-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
// CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
// CHECK18-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK18-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK18-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
// CHECK18-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK18-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK18-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK18-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK18-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK18-NEXT: [[SUB4:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK18-NEXT: store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK18-NEXT: store i32 0, i32* [[I]], align 4
// CHECK18-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK18-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK18-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK18: omp.precond.then:
// CHECK18-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK18-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK18-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
// CHECK18-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK18-NEXT: [[CONV5:%.*]] = trunc i64 [[TMP6]] to i32
// CHECK18-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK18-NEXT: [[CONV6:%.*]] = trunc i64 [[TMP7]] to i32
// CHECK18-NEXT: store i32 [[CONV5]], i32* [[DOTOMP_LB]], align 4
// CHECK18-NEXT: store i32 [[CONV6]], i32* [[DOTOMP_UB]], align 4
// CHECK18-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK18-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK18-NEXT: [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK18-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
// CHECK18-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP9]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK18-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK18-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK18-NEXT: [[CMP8:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
// CHECK18-NEXT: br i1 [[CMP8]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK18: cond.true:
// CHECK18-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK18-NEXT: br label [[COND_END:%.*]]
// CHECK18: cond.false:
// CHECK18-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK18-NEXT: br label [[COND_END]]
// CHECK18: cond.end:
// CHECK18-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
// CHECK18-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK18-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK18-NEXT: store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK18: omp.inner.for.cond:
// CHECK18-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK18-NEXT: [[CMP9:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
// CHECK18-NEXT: br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK18: omp.inner.for.body:
// CHECK18-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP17]], 1
// CHECK18-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK18-NEXT: store i32 [[ADD]], i32* [[I7]], align 4
// CHECK18-NEXT: [[TMP18:%.*]] = load i32, i32* [[I7]], align 4
// CHECK18-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP18]] to i64
// CHECK18-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 [[IDXPROM]]
// CHECK18-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK18-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK18: omp.body.continue:
// CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK18: omp.inner.for.inc:
// CHECK18-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP19]], 1
// CHECK18-NEXT: store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK18: omp.inner.for.end:
// CHECK18-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK18: omp.loop.exit:
// CHECK18-NEXT: [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK18-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
// CHECK18-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
// CHECK18-NEXT: br label [[OMP_PRECOND_END]]
// CHECK18: omp.precond.end:
// CHECK18-NEXT: ret void
//
//
// CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151
// CHECK18-SAME: (i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK18-NEXT: entry:
// CHECK18-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
// CHECK18-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
// CHECK18-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK18-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
// CHECK18-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK18-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK18-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK18-NEXT: [[CONV1:%.*]] = bitcast i64* [[N_CASTED]] to i32*
// CHECK18-NEXT: store i32 [[TMP2]], i32* [[CONV1]], align 4
// CHECK18-NEXT: [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8
// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]])
// CHECK18-NEXT: ret void
//
//
// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..8
// CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK18-NEXT: entry:
// CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[I3:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
// CHECK18-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK18-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK18-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
// CHECK18-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK18-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
// CHECK18-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK18-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK18-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK18-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK18-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK18-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK18-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK18-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK18-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK18-NEXT: store i32 0, i32* [[I]], align 4
// CHECK18-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK18-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK18-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK18: omp.precond.then:
// CHECK18-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK18-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK18-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK18-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK18-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK18-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
// CHECK18-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK18-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK18-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
// CHECK18-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK18: cond.true:
// CHECK18-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK18-NEXT: br label [[COND_END:%.*]]
// CHECK18: cond.false:
// CHECK18-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: br label [[COND_END]]
// CHECK18: cond.end:
// CHECK18-NEXT: [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
// CHECK18-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK18-NEXT: store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK18: omp.inner.for.cond:
// CHECK18-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
// CHECK18-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK18: omp.inner.for.body:
// CHECK18-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK18-NEXT: [[TMP16:%.*]] = zext i32 [[TMP15]] to i64
// CHECK18-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
// CHECK18-NEXT: [[TMP19:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK18-NEXT: [[CONV6:%.*]] = bitcast i64* [[N_CASTED]] to i32*
// CHECK18-NEXT: store i32 [[TMP19]], i32* [[CONV6]], align 4
// CHECK18-NEXT: [[TMP20:%.*]] = load i64, i64* [[N_CASTED]], align 8
// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]])
// CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK18: omp.inner.for.inc:
// CHECK18-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK18-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
// CHECK18-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK18: omp.inner.for.end:
// CHECK18-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK18: omp.loop.exit:
// CHECK18-NEXT: [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK18-NEXT: [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
// CHECK18-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
// CHECK18-NEXT: br label [[OMP_PRECOND_END]]
// CHECK18: omp.precond.end:
// CHECK18-NEXT: ret void
//
//
// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..9
// CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK18-NEXT: entry:
// CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[I5:%.*]] = alloca i32, align 4
// CHECK18-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK18-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK18-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK18-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK18-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
// CHECK18-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK18-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
// CHECK18-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK18-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK18-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK18-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK18-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK18-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK18-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK18-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK18-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK18-NEXT: store i32 0, i32* [[I]], align 4
// CHECK18-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK18-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK18-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK18: omp.precond.then:
// CHECK18-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK18-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK18-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
// CHECK18-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK18-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP6]] to i32
// CHECK18-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK18-NEXT: [[CONV4:%.*]] = trunc i64 [[TMP7]] to i32
// CHECK18-NEXT: store i32 [[CONV3]], i32* [[DOTOMP_LB]], align 4
// CHECK18-NEXT: store i32 [[CONV4]], i32* [[DOTOMP_UB]], align 4
// CHECK18-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK18-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK18-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK18-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK18-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK18-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
// CHECK18-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP11]], i32 1073741859, i32 [[TMP8]], i32 [[TMP9]], i32 1, i32 1)
// CHECK18-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK18: omp.dispatch.cond:
// CHECK18-NEXT: [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK18-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
// CHECK18-NEXT: [[TMP14:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP13]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
// CHECK18-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP14]], 0
// CHECK18-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK18: omp.dispatch.body:
// CHECK18-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK18-NEXT: store i32 [[TMP15]], i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK18: omp.inner.for.cond:
// CHECK18-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15
// CHECK18-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !15
// CHECK18-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
// CHECK18-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK18: omp.inner.for.body:
// CHECK18-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15
// CHECK18-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
// CHECK18-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK18-NEXT: store i32 [[ADD]], i32* [[I5]], align 4, !llvm.access.group !15
// CHECK18-NEXT: [[TMP19:%.*]] = load i32, i32* [[I5]], align 4, !llvm.access.group !15
// CHECK18-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP19]] to i64
// CHECK18-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 [[IDXPROM]]
// CHECK18-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !15
// CHECK18-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK18: omp.body.continue:
// CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK18: omp.inner.for.inc:
// CHECK18-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15
// CHECK18-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP20]], 1
// CHECK18-NEXT: store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15
// CHECK18-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]]
// CHECK18: omp.inner.for.end:
// CHECK18-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK18: omp.dispatch.inc:
// CHECK18-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK18: omp.dispatch.end:
// CHECK18-NEXT: br label [[OMP_PRECOND_END]]
// CHECK18: omp.precond.end:
// CHECK18-NEXT: ret void
//
//
// CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155
// CHECK18-SAME: (i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK18-NEXT: entry:
// CHECK18-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
// CHECK18-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
// CHECK18-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK18-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK18-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
// CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
// CHECK18-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK18-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK18-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
// CHECK18-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK18-NEXT: [[CONV2:%.*]] = bitcast i64* [[N_CASTED]] to i32*
// CHECK18-NEXT: store i32 [[TMP2]], i32* [[CONV2]], align 4
// CHECK18-NEXT: [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8
// CHECK18-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV1]], align 8
// CHECK18-NEXT: [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
// CHECK18-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4
// CHECK18-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]])
// CHECK18-NEXT: ret void
//
//
// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..11
// CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK18-NEXT: entry:
// CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[I5:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
// CHECK18-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK18-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK18-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
// CHECK18-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK18-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK18-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
// CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
// CHECK18-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK18-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK18-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
// CHECK18-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK18-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK18-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK18-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK18-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK18-NEXT: [[SUB4:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK18-NEXT: store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK18-NEXT: store i32 0, i32* [[I]], align 4
// CHECK18-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK18-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK18-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK18: omp.precond.then:
// CHECK18-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK18-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK18-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK18-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK18-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK18-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
// CHECK18-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK18-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK18-NEXT: [[CMP6:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
// CHECK18-NEXT: br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK18: cond.true:
// CHECK18-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK18-NEXT: br label [[COND_END:%.*]]
// CHECK18: cond.false:
// CHECK18-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: br label [[COND_END]]
// CHECK18: cond.end:
// CHECK18-NEXT: [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
// CHECK18-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK18-NEXT: store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK18: omp.inner.for.cond:
// CHECK18-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
// CHECK18-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK18: omp.inner.for.body:
// CHECK18-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK18-NEXT: [[TMP16:%.*]] = zext i32 [[TMP15]] to i64
// CHECK18-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
// CHECK18-NEXT: [[TMP19:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK18-NEXT: [[CONV8:%.*]] = bitcast i64* [[N_CASTED]] to i32*
// CHECK18-NEXT: store i32 [[TMP19]], i32* [[CONV8]], align 4
// CHECK18-NEXT: [[TMP20:%.*]] = load i64, i64* [[N_CASTED]], align 8
// CHECK18-NEXT: [[TMP21:%.*]] = load i32, i32* [[CONV1]], align 8
// CHECK18-NEXT: [[CONV9:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
// CHECK18-NEXT: store i32 [[TMP21]], i32* [[CONV9]], align 4
// CHECK18-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*, i64)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP22]])
// CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK18: omp.inner.for.inc:
// CHECK18-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK18-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
// CHECK18-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK18: omp.inner.for.end:
// CHECK18-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK18: omp.loop.exit:
// CHECK18-NEXT: [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK18-NEXT: [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
// CHECK18-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP26]])
// CHECK18-NEXT: br label [[OMP_PRECOND_END]]
// CHECK18: omp.precond.end:
// CHECK18-NEXT: ret void
//
//
// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..12
// CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], i64 [[N:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK18-NEXT: entry:
// CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[I7:%.*]] = alloca i32, align 4
// CHECK18-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK18-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK18-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK18-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK18-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8
// CHECK18-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK18-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK18-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
// CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32*
// CHECK18-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK18-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK18-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
// CHECK18-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK18-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK18-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK18-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK18-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK18-NEXT: [[SUB4:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK18-NEXT: store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK18-NEXT: store i32 0, i32* [[I]], align 4
// CHECK18-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK18-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK18-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK18: omp.precond.then:
// CHECK18-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK18-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4
// CHECK18-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
// CHECK18-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK18-NEXT: [[CONV5:%.*]] = trunc i64 [[TMP6]] to i32
// CHECK18-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK18-NEXT: [[CONV6:%.*]] = trunc i64 [[TMP7]] to i32
// CHECK18-NEXT: store i32 [[CONV5]], i32* [[DOTOMP_LB]], align 4
// CHECK18-NEXT: store i32 [[CONV6]], i32* [[DOTOMP_UB]], align 4
// CHECK18-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK18-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK18-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV1]], align 8
// CHECK18-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK18-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK18-NEXT: [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK18-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
// CHECK18-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP12]], i32 1073741859, i32 [[TMP9]], i32 [[TMP10]], i32 1, i32 [[TMP8]])
// CHECK18-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK18: omp.dispatch.cond:
// CHECK18-NEXT: [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK18-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
// CHECK18-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
// CHECK18-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP15]], 0
// CHECK18-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK18: omp.dispatch.body:
// CHECK18-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK18-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK18: omp.inner.for.cond:
// CHECK18-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
// CHECK18-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !18
// CHECK18-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
// CHECK18-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK18: omp.inner.for.body:
// CHECK18-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
// CHECK18-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
// CHECK18-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK18-NEXT: store i32 [[ADD]], i32* [[I7]], align 4, !llvm.access.group !18
// CHECK18-NEXT: [[TMP20:%.*]] = load i32, i32* [[I7]], align 4, !llvm.access.group !18
// CHECK18-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP20]] to i64
// CHECK18-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 [[IDXPROM]]
// CHECK18-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !18
// CHECK18-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK18: omp.body.continue:
// CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK18: omp.inner.for.inc:
// CHECK18-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
// CHECK18-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP21]], 1
// CHECK18-NEXT: store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18
// CHECK18-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
// CHECK18: omp.inner.for.end:
// CHECK18-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK18: omp.dispatch.inc:
// CHECK18-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK18: omp.dispatch.end:
// CHECK18-NEXT: br label [[OMP_PRECOND_END]]
// CHECK18: omp.precond.end:
// CHECK18-NEXT: ret void
//
//
// CHECK18-LABEL: define {{[^@]+}}@_Z5tmainIiLi10EEiT_
// CHECK18-SAME: (i32 signext [[ARGC:%.*]]) #[[ATTR4:[0-9]+]] comdat {
// CHECK18-NEXT: entry:
// CHECK18-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[A:%.*]] = alloca [10 x i32], align 4
// CHECK18-NEXT: [[M:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8
// CHECK18-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8
// CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8
// CHECK18-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS1:%.*]] = alloca [1 x i8*], align 8
// CHECK18-NEXT: [[DOTOFFLOAD_PTRS2:%.*]] = alloca [1 x i8*], align 8
// CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS3:%.*]] = alloca [1 x i8*], align 8
// CHECK18-NEXT: [[_TMP4:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS7:%.*]] = alloca [2 x i8*], align 8
// CHECK18-NEXT: [[DOTOFFLOAD_PTRS8:%.*]] = alloca [2 x i8*], align 8
// CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS9:%.*]] = alloca [2 x i8*], align 8
// CHECK18-NEXT: [[_TMP10:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS13:%.*]] = alloca [1 x i8*], align 8
// CHECK18-NEXT: [[DOTOFFLOAD_PTRS14:%.*]] = alloca [1 x i8*], align 8
// CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS15:%.*]] = alloca [1 x i8*], align 8
// CHECK18-NEXT: [[_TMP16:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTCAPTURE_EXPR_19:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTCAPTURE_EXPR__CASTED20:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS22:%.*]] = alloca [2 x i8*], align 8
// CHECK18-NEXT: [[DOTOFFLOAD_PTRS23:%.*]] = alloca [2 x i8*], align 8
// CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS24:%.*]] = alloca [2 x i8*], align 8
// CHECK18-NEXT: [[_TMP25:%.*]] = alloca i32, align 4
// CHECK18-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
// CHECK18-NEXT: store i32 10, i32* [[M]], align 4
// CHECK18-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK18-NEXT: [[TMP1:%.*]] = bitcast i8** [[TMP0]] to [10 x i32]**
// CHECK18-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP1]], align 8
// CHECK18-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK18-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to [10 x i32]**
// CHECK18-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP3]], align 8
// CHECK18-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
// CHECK18-NEXT: store i8* null, i8** [[TMP4]], align 8
// CHECK18-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK18-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
// CHECK18-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK18-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0
// CHECK18-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK18: omp_offload.failed:
// CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112([10 x i32]* [[A]]) #[[ATTR3]]
// CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK18: omp_offload.cont:
// CHECK18-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
// CHECK18-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to [10 x i32]**
// CHECK18-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP10]], align 8
// CHECK18-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
// CHECK18-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to [10 x i32]**
// CHECK18-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP12]], align 8
// CHECK18-NEXT: [[TMP13:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i64 0, i64 0
// CHECK18-NEXT: store i8* null, i8** [[TMP13]], align 8
// CHECK18-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
// CHECK18-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
// CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
// CHECK18-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK18-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
// CHECK18-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]]
// CHECK18: omp_offload.failed5:
// CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116([10 x i32]* [[A]]) #[[ATTR3]]
// CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT6]]
// CHECK18: omp_offload.cont6:
// CHECK18-NEXT: [[TMP18:%.*]] = load i32, i32* [[M]], align 4
// CHECK18-NEXT: store i32 [[TMP18]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK18-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
// CHECK18-NEXT: store i32 [[TMP19]], i32* [[CONV]], align 4
// CHECK18-NEXT: [[TMP20:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
// CHECK18-NEXT: [[TMP21:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0
// CHECK18-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to [10 x i32]**
// CHECK18-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP22]], align 8
// CHECK18-NEXT: [[TMP23:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0
// CHECK18-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to [10 x i32]**
// CHECK18-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP24]], align 8
// CHECK18-NEXT: [[TMP25:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i64 0, i64 0
// CHECK18-NEXT: store i8* null, i8** [[TMP25]], align 8
// CHECK18-NEXT: [[TMP26:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 1
// CHECK18-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i64*
// CHECK18-NEXT: store i64 [[TMP20]], i64* [[TMP27]], align 8
// CHECK18-NEXT: [[TMP28:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 1
// CHECK18-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64*
// CHECK18-NEXT: store i64 [[TMP20]], i64* [[TMP29]], align 8
// CHECK18-NEXT: [[TMP30:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i64 0, i64 1
// CHECK18-NEXT: store i8* null, i8** [[TMP30]], align 8
// CHECK18-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0
// CHECK18-NEXT: [[TMP32:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0
// CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
// CHECK18-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.23, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.24, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK18-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0
// CHECK18-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]]
// CHECK18: omp_offload.failed11:
// CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120([10 x i32]* [[A]], i64 [[TMP20]]) #[[ATTR3]]
// CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT12]]
// CHECK18: omp_offload.cont12:
// CHECK18-NEXT: [[TMP35:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0
// CHECK18-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to [10 x i32]**
// CHECK18-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP36]], align 8
// CHECK18-NEXT: [[TMP37:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0
// CHECK18-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to [10 x i32]**
// CHECK18-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP38]], align 8
// CHECK18-NEXT: [[TMP39:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i64 0, i64 0
// CHECK18-NEXT: store i8* null, i8** [[TMP39]], align 8
// CHECK18-NEXT: [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0
// CHECK18-NEXT: [[TMP41:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0
// CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
// CHECK18-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124.region_id, i32 1, i8** [[TMP40]], i8** [[TMP41]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.27, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.28, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK18-NEXT: [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0
// CHECK18-NEXT: br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]]
// CHECK18: omp_offload.failed17:
// CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124([10 x i32]* [[A]]) #[[ATTR3]]
// CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT18]]
// CHECK18: omp_offload.cont18:
// CHECK18-NEXT: [[TMP44:%.*]] = load i32, i32* [[M]], align 4
// CHECK18-NEXT: store i32 [[TMP44]], i32* [[DOTCAPTURE_EXPR_19]], align 4
// CHECK18-NEXT: [[TMP45:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_19]], align 4
// CHECK18-NEXT: [[CONV21:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED20]] to i32*
// CHECK18-NEXT: store i32 [[TMP45]], i32* [[CONV21]], align 4
// CHECK18-NEXT: [[TMP46:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED20]], align 8
// CHECK18-NEXT: [[TMP47:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0
// CHECK18-NEXT: [[TMP48:%.*]] = bitcast i8** [[TMP47]] to [10 x i32]**
// CHECK18-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP48]], align 8
// CHECK18-NEXT: [[TMP49:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0
// CHECK18-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to [10 x i32]**
// CHECK18-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP50]], align 8
// CHECK18-NEXT: [[TMP51:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 0
// CHECK18-NEXT: store i8* null, i8** [[TMP51]], align 8
// CHECK18-NEXT: [[TMP52:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 1
// CHECK18-NEXT: [[TMP53:%.*]] = bitcast i8** [[TMP52]] to i64*
// CHECK18-NEXT: store i64 [[TMP46]], i64* [[TMP53]], align 8
// CHECK18-NEXT: [[TMP54:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 1
// CHECK18-NEXT: [[TMP55:%.*]] = bitcast i8** [[TMP54]] to i64*
// CHECK18-NEXT: store i64 [[TMP46]], i64* [[TMP55]], align 8
// CHECK18-NEXT: [[TMP56:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 1
// CHECK18-NEXT: store i8* null, i8** [[TMP56]], align 8
// CHECK18-NEXT: [[TMP57:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0
// CHECK18-NEXT: [[TMP58:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0
// CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
// CHECK18-NEXT: [[TMP59:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128.region_id, i32 2, i8** [[TMP57]], i8** [[TMP58]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.31, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.32, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK18-NEXT: [[TMP60:%.*]] = icmp ne i32 [[TMP59]], 0
// CHECK18-NEXT: br i1 [[TMP60]], label [[OMP_OFFLOAD_FAILED26:%.*]], label [[OMP_OFFLOAD_CONT27:%.*]]
// CHECK18: omp_offload.failed26:
// CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128([10 x i32]* [[A]], i64 [[TMP46]]) #[[ATTR3]]
// CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT27]]
// CHECK18: omp_offload.cont27:
// CHECK18-NEXT: ret i32 0
//
//
// CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112
// CHECK18-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK18-NEXT: entry:
// CHECK18-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK18-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
// CHECK18-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]])
// CHECK18-NEXT: ret void
//
//
// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..14
// CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK18-NEXT: entry:
// CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK18-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK18-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK18-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK18-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
// CHECK18-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
// CHECK18-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK18-NEXT: store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK18-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK18-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK18-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK18-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK18-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
// CHECK18-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK18: cond.true:
// CHECK18-NEXT: br label [[COND_END:%.*]]
// CHECK18: cond.false:
// CHECK18-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: br label [[COND_END]]
// CHECK18: cond.end:
// CHECK18-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK18-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK18-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK18: omp.inner.for.cond:
// CHECK18-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK18-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK18: omp.inner.for.body:
// CHECK18-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK18-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
// CHECK18-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]])
// CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK18: omp.inner.for.inc:
// CHECK18-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK18-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
// CHECK18-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK18: omp.inner.for.end:
// CHECK18-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK18: omp.loop.exit:
// CHECK18-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK18-NEXT: ret void
//
//
// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..15
// CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK18-NEXT: entry:
// CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK18-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK18-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK18-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK18-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK18-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK18-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
// CHECK18-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
// CHECK18-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK18-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4
// CHECK18-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK18-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
// CHECK18-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK18-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
// CHECK18-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
// CHECK18-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
// CHECK18-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK18-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK18-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK18-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK18-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK18-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK18-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 9
// CHECK18-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK18: cond.true:
// CHECK18-NEXT: br label [[COND_END:%.*]]
// CHECK18: cond.false:
// CHECK18-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK18-NEXT: br label [[COND_END]]
// CHECK18: cond.end:
// CHECK18-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
// CHECK18-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK18-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK18-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK18: omp.inner.for.cond:
// CHECK18-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK18-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
// CHECK18-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK18: omp.inner.for.body:
// CHECK18-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
// CHECK18-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK18-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK18-NEXT: [[TMP11:%.*]] = load i32, i32* [[I]], align 4
// CHECK18-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
// CHECK18-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]]
// CHECK18-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK18-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK18: omp.body.continue:
// CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK18: omp.inner.for.inc:
// CHECK18-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
// CHECK18-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK18: omp.inner.for.end:
// CHECK18-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK18: omp.loop.exit:
// CHECK18-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
// CHECK18-NEXT: ret void
//
//
// CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116
// CHECK18-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK18-NEXT: entry:
// CHECK18-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK18-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
// CHECK18-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..17 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]])
// CHECK18-NEXT: ret void
//
//
// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..17
// CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK18-NEXT: entry:
// CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK18-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK18-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK18-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK18-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
// CHECK18-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
// CHECK18-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK18-NEXT: store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK18-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK18-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK18-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK18-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK18-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
// CHECK18-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK18: cond.true:
// CHECK18-NEXT: br label [[COND_END:%.*]]
// CHECK18: cond.false:
// CHECK18-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: br label [[COND_END]]
// CHECK18: cond.end:
// CHECK18-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK18-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK18-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK18: omp.inner.for.cond:
// CHECK18-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK18-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK18: omp.inner.for.body:
// CHECK18-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK18-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
// CHECK18-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]])
// CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK18: omp.inner.for.inc:
// CHECK18-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK18-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
// CHECK18-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK18: omp.inner.for.end:
// CHECK18-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK18: omp.loop.exit:
// CHECK18-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK18-NEXT: ret void
//
//
// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..18
// CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK18-NEXT: entry:
// CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK18-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK18-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK18-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK18-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK18-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK18-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
// CHECK18-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
// CHECK18-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK18-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4
// CHECK18-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK18-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
// CHECK18-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK18-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
// CHECK18-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
// CHECK18-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
// CHECK18-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK18-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK18-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK18-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK18-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK18-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK18-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 9
// CHECK18-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK18: cond.true:
// CHECK18-NEXT: br label [[COND_END:%.*]]
// CHECK18: cond.false:
// CHECK18-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK18-NEXT: br label [[COND_END]]
// CHECK18: cond.end:
// CHECK18-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
// CHECK18-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK18-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK18-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK18: omp.inner.for.cond:
// CHECK18-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK18-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
// CHECK18-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK18: omp.inner.for.body:
// CHECK18-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
// CHECK18-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK18-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK18-NEXT: [[TMP11:%.*]] = load i32, i32* [[I]], align 4
// CHECK18-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
// CHECK18-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]]
// CHECK18-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK18-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK18: omp.body.continue:
// CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK18: omp.inner.for.inc:
// CHECK18-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
// CHECK18-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK18: omp.inner.for.end:
// CHECK18-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK18: omp.loop.exit:
// CHECK18-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
// CHECK18-NEXT: ret void
//
//
// CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120
// CHECK18-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK18-NEXT: entry:
// CHECK18-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK18-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
// CHECK18-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
// CHECK18-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
// CHECK18-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
// CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
// CHECK18-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK18-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
// CHECK18-NEXT: store i32 [[TMP1]], i32* [[CONV1]], align 4
// CHECK18-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..21 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP2]])
// CHECK18-NEXT: ret void
//
//
// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..21
// CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK18-NEXT: entry:
// CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK18-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
// CHECK18-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK18-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK18-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
// CHECK18-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
// CHECK18-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
// CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
// CHECK18-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK18-NEXT: store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK18-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK18-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK18-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK18-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK18-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
// CHECK18-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK18: cond.true:
// CHECK18-NEXT: br label [[COND_END:%.*]]
// CHECK18: cond.false:
// CHECK18-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: br label [[COND_END]]
// CHECK18: cond.end:
// CHECK18-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK18-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK18-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK18: omp.inner.for.cond:
// CHECK18-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK18-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK18: omp.inner.for.body:
// CHECK18-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK18-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
// CHECK18-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
// CHECK18-NEXT: [[TMP12:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK18-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
// CHECK18-NEXT: store i32 [[TMP12]], i32* [[CONV2]], align 4
// CHECK18-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]])
// CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK18: omp.inner.for.inc:
// CHECK18-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK18-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]]
// CHECK18-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK18: omp.inner.for.end:
// CHECK18-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK18: omp.loop.exit:
// CHECK18-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK18-NEXT: ret void
//
//
// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..22
// CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK18-NEXT: entry:
// CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK18-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK18-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK18-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK18-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK18-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK18-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
// CHECK18-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
// CHECK18-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
// CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
// CHECK18-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK18-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4
// CHECK18-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK18-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
// CHECK18-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK18-NEXT: [[CONV2:%.*]] = trunc i64 [[TMP2]] to i32
// CHECK18-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_LB]], align 4
// CHECK18-NEXT: store i32 [[CONV2]], i32* [[DOTOMP_UB]], align 4
// CHECK18-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK18-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK18-NEXT: [[TMP3:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK18-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK18-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
// CHECK18-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP5]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP3]])
// CHECK18-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK18: omp.dispatch.cond:
// CHECK18-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK18-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK18-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP7]] to i32
// CHECK18-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], [[CONV3]]
// CHECK18-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK18: cond.true:
// CHECK18-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK18-NEXT: [[CONV4:%.*]] = trunc i64 [[TMP8]] to i32
// CHECK18-NEXT: br label [[COND_END:%.*]]
// CHECK18: cond.false:
// CHECK18-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK18-NEXT: br label [[COND_END]]
// CHECK18: cond.end:
// CHECK18-NEXT: [[COND:%.*]] = phi i32 [ [[CONV4]], [[COND_TRUE]] ], [ [[TMP9]], [[COND_FALSE]] ]
// CHECK18-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK18-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK18-NEXT: store i32 [[TMP10]], i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK18-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]]
// CHECK18-NEXT: br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK18: omp.dispatch.body:
// CHECK18-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK18: omp.inner.for.cond:
// CHECK18-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK18-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
// CHECK18-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK18: omp.inner.for.body:
// CHECK18-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP15]], 1
// CHECK18-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK18-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK18-NEXT: [[TMP16:%.*]] = load i32, i32* [[I]], align 4
// CHECK18-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP16]] to i64
// CHECK18-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]]
// CHECK18-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK18-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK18: omp.body.continue:
// CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK18: omp.inner.for.inc:
// CHECK18-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP17]], 1
// CHECK18-NEXT: store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK18: omp.inner.for.end:
// CHECK18-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK18: omp.dispatch.inc:
// CHECK18-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK18-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK18-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
// CHECK18-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_LB]], align 4
// CHECK18-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK18-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK18-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
// CHECK18-NEXT: store i32 [[ADD9]], i32* [[DOTOMP_UB]], align 4
// CHECK18-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK18: omp.dispatch.end:
// CHECK18-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
// CHECK18-NEXT: ret void
//
//
// CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124
// CHECK18-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK18-NEXT: entry:
// CHECK18-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK18-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
// CHECK18-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..25 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]])
// CHECK18-NEXT: ret void
//
//
// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..25
// CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK18-NEXT: entry:
// CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK18-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK18-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK18-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK18-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
// CHECK18-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
// CHECK18-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK18-NEXT: store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK18-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK18-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK18-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK18-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK18-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
// CHECK18-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK18: cond.true:
// CHECK18-NEXT: br label [[COND_END:%.*]]
// CHECK18: cond.false:
// CHECK18-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: br label [[COND_END]]
// CHECK18: cond.end:
// CHECK18-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK18-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK18-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK18: omp.inner.for.cond:
// CHECK18-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK18-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK18: omp.inner.for.body:
// CHECK18-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK18-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
// CHECK18-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]])
// CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK18: omp.inner.for.inc:
// CHECK18-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK18-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
// CHECK18-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK18: omp.inner.for.end:
// CHECK18-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK18: omp.loop.exit:
// CHECK18-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK18-NEXT: ret void
//
//
// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..26
// CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK18-NEXT: entry:
// CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK18-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK18-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK18-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK18-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK18-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK18-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
// CHECK18-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
// CHECK18-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK18-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4
// CHECK18-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK18-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
// CHECK18-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK18-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
// CHECK18-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
// CHECK18-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
// CHECK18-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK18-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK18-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK18-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK18-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK18-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
// CHECK18-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32 1073741859, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 1)
// CHECK18-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK18: omp.dispatch.cond:
// CHECK18-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
// CHECK18-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
// CHECK18-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK18: omp.dispatch.body:
// CHECK18-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK18-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK18: omp.inner.for.cond:
// CHECK18-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
// CHECK18-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !21
// CHECK18-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
// CHECK18-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK18: omp.inner.for.body:
// CHECK18-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
// CHECK18-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
// CHECK18-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK18-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !21
// CHECK18-NEXT: [[TMP12:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !21
// CHECK18-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP12]] to i64
// CHECK18-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]]
// CHECK18-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !21
// CHECK18-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK18: omp.body.continue:
// CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK18: omp.inner.for.inc:
// CHECK18-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
// CHECK18-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP13]], 1
// CHECK18-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21
// CHECK18-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]]
// CHECK18: omp.inner.for.end:
// CHECK18-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK18: omp.dispatch.inc:
// CHECK18-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK18: omp.dispatch.end:
// CHECK18-NEXT: ret void
//
//
// CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128
// CHECK18-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK18-NEXT: entry:
// CHECK18-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK18-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
// CHECK18-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
// CHECK18-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
// CHECK18-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
// CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
// CHECK18-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK18-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
// CHECK18-NEXT: store i32 [[TMP1]], i32* [[CONV1]], align 4
// CHECK18-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..29 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP2]])
// CHECK18-NEXT: ret void
//
//
// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..29
// CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK18-NEXT: entry:
// CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK18-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
// CHECK18-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK18-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK18-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
// CHECK18-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
// CHECK18-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
// CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
// CHECK18-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK18-NEXT: store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK18-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK18-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK18-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK18-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK18-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
// CHECK18-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK18: cond.true:
// CHECK18-NEXT: br label [[COND_END:%.*]]
// CHECK18: cond.false:
// CHECK18-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: br label [[COND_END]]
// CHECK18: cond.end:
// CHECK18-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK18-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK18-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK18: omp.inner.for.cond:
// CHECK18-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK18-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK18: omp.inner.for.body:
// CHECK18-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK18-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
// CHECK18-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK18-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
// CHECK18-NEXT: [[TMP12:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK18-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32*
// CHECK18-NEXT: store i32 [[TMP12]], i32* [[CONV2]], align 4
// CHECK18-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8
// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..30 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]])
// CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK18: omp.inner.for.inc:
// CHECK18-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK18-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]]
// CHECK18-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK18: omp.inner.for.end:
// CHECK18-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK18: omp.loop.exit:
// CHECK18-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK18-NEXT: ret void
//
//
// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..30
// CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK18-NEXT: entry:
// CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8
// CHECK18-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK18-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK18-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK18-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK18-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK18-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK18-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8
// CHECK18-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8
// CHECK18-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8
// CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32*
// CHECK18-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK18-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4
// CHECK18-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
// CHECK18-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
// CHECK18-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
// CHECK18-NEXT: [[CONV2:%.*]] = trunc i64 [[TMP2]] to i32
// CHECK18-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_LB]], align 4
// CHECK18-NEXT: store i32 [[CONV2]], i32* [[DOTOMP_UB]], align 4
// CHECK18-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK18-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK18-NEXT: [[TMP3:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK18-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK18-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK18-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK18-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
// CHECK18-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], i32 1073741859, i32 [[TMP4]], i32 [[TMP5]], i32 1, i32 [[TMP3]])
// CHECK18-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK18: omp.dispatch.cond:
// CHECK18-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
// CHECK18-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP8]], 0
// CHECK18-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK18: omp.dispatch.body:
// CHECK18-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK18-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
// CHECK18-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK18: omp.inner.for.cond:
// CHECK18-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
// CHECK18-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !24
// CHECK18-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
// CHECK18-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK18: omp.inner.for.body:
// CHECK18-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
// CHECK18-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
// CHECK18-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK18-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !24
// CHECK18-NEXT: [[TMP13:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !24
// CHECK18-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP13]] to i64
// CHECK18-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]]
// CHECK18-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !24
// CHECK18-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK18: omp.body.continue:
// CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK18: omp.inner.for.inc:
// CHECK18-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
// CHECK18-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP14]], 1
// CHECK18-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24
// CHECK18-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]]
// CHECK18: omp.inner.for.end:
// CHECK18-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK18: omp.dispatch.inc:
// CHECK18-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK18: omp.dispatch.end:
// CHECK18-NEXT: ret void
//
//
// CHECK18-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
// CHECK18-SAME: () #[[ATTR5:[0-9]+]] {
// CHECK18-NEXT: entry:
// CHECK18-NEXT: call void @__tgt_register_requires(i64 1)
// CHECK18-NEXT: ret void
//
//
// CHECK19-LABEL: define {{[^@]+}}@main
// CHECK19-SAME: (i32 [[ARGC:%.*]], i8** [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[ARGV_ADDR:%.*]] = alloca i8**, align 4
// CHECK19-NEXT: [[N:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4
// CHECK19-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[M:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4
// CHECK19-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4
// CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4
// CHECK19-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4
// CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[N_CASTED3:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [3 x i8*], align 4
// CHECK19-NEXT: [[DOTOFFLOAD_PTRS5:%.*]] = alloca [3 x i8*], align 4
// CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [3 x i8*], align 4
// CHECK19-NEXT: [[DOTOFFLOAD_SIZES7:%.*]] = alloca [3 x i64], align 4
// CHECK19-NEXT: [[_TMP8:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTCAPTURE_EXPR_17:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[N_CASTED18:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS19:%.*]] = alloca [4 x i8*], align 4
// CHECK19-NEXT: [[DOTOFFLOAD_PTRS20:%.*]] = alloca [4 x i8*], align 4
// CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS21:%.*]] = alloca [4 x i8*], align 4
// CHECK19-NEXT: [[DOTOFFLOAD_SIZES22:%.*]] = alloca [4 x i64], align 4
// CHECK19-NEXT: [[_TMP23:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[N_CASTED32:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS33:%.*]] = alloca [3 x i8*], align 4
// CHECK19-NEXT: [[DOTOFFLOAD_PTRS34:%.*]] = alloca [3 x i8*], align 4
// CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS35:%.*]] = alloca [3 x i8*], align 4
// CHECK19-NEXT: [[DOTOFFLOAD_SIZES36:%.*]] = alloca [3 x i64], align 4
// CHECK19-NEXT: [[_TMP37:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTCAPTURE_EXPR_38:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTCAPTURE_EXPR_39:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTCAPTURE_EXPR_46:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[N_CASTED47:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTCAPTURE_EXPR__CASTED48:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS49:%.*]] = alloca [4 x i8*], align 4
// CHECK19-NEXT: [[DOTOFFLOAD_PTRS50:%.*]] = alloca [4 x i8*], align 4
// CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS51:%.*]] = alloca [4 x i8*], align 4
// CHECK19-NEXT: [[DOTOFFLOAD_SIZES52:%.*]] = alloca [4 x i64], align 4
// CHECK19-NEXT: [[_TMP53:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTCAPTURE_EXPR_54:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTCAPTURE_EXPR_55:%.*]] = alloca i32, align 4
// CHECK19-NEXT: store i32 0, i32* [[RETVAL]], align 4
// CHECK19-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
// CHECK19-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 4
// CHECK19-NEXT: store i32 100, i32* [[N]], align 4
// CHECK19-NEXT: [[TMP0:%.*]] = load i32, i32* [[N]], align 4
// CHECK19-NEXT: [[TMP1:%.*]] = call i8* @llvm.stacksave()
// CHECK19-NEXT: store i8* [[TMP1]], i8** [[SAVED_STACK]], align 4
// CHECK19-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4
// CHECK19-NEXT: store i32 [[TMP0]], i32* [[__VLA_EXPR0]], align 4
// CHECK19-NEXT: store i32 10, i32* [[M]], align 4
// CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[N]], align 4
// CHECK19-NEXT: store i32 [[TMP2]], i32* [[N_CASTED]], align 4
// CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
// CHECK19-NEXT: [[TMP4:%.*]] = mul nuw i32 [[TMP0]], 4
// CHECK19-NEXT: [[TMP5:%.*]] = sext i32 [[TMP4]] to i64
// CHECK19-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK19-NEXT: [[TMP7:%.*]] = bitcast i8** [[TMP6]] to i32*
// CHECK19-NEXT: store i32 [[TMP3]], i32* [[TMP7]], align 4
// CHECK19-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK19-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i32*
// CHECK19-NEXT: store i32 [[TMP3]], i32* [[TMP9]], align 4
// CHECK19-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
// CHECK19-NEXT: store i64 4, i64* [[TMP10]], align 4
// CHECK19-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// CHECK19-NEXT: store i8* null, i8** [[TMP11]], align 4
// CHECK19-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
// CHECK19-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32*
// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP13]], align 4
// CHECK19-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
// CHECK19-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32*
// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP15]], align 4
// CHECK19-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1
// CHECK19-NEXT: store i64 4, i64* [[TMP16]], align 4
// CHECK19-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
// CHECK19-NEXT: store i8* null, i8** [[TMP17]], align 4
// CHECK19-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
// CHECK19-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32**
// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 4
// CHECK19-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
// CHECK19-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32**
// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 4
// CHECK19-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2
// CHECK19-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 4
// CHECK19-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
// CHECK19-NEXT: store i8* null, i8** [[TMP23]], align 4
// CHECK19-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK19-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK19-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
// CHECK19-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4
// CHECK19-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK19-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK19-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0
// CHECK19-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK19-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK19-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK19-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1
// CHECK19-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64
// CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]])
// CHECK19-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK19-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
// CHECK19-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK19: omp_offload.failed:
// CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139(i32 [[TMP3]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3:[0-9]+]]
// CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK19: omp_offload.cont:
// CHECK19-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4
// CHECK19-NEXT: store i32 [[TMP33]], i32* [[N_CASTED3]], align 4
// CHECK19-NEXT: [[TMP34:%.*]] = load i32, i32* [[N_CASTED3]], align 4
// CHECK19-NEXT: [[TMP35:%.*]] = mul nuw i32 [[TMP0]], 4
// CHECK19-NEXT: [[TMP36:%.*]] = sext i32 [[TMP35]] to i64
// CHECK19-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
// CHECK19-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32*
// CHECK19-NEXT: store i32 [[TMP34]], i32* [[TMP38]], align 4
// CHECK19-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
// CHECK19-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i32*
// CHECK19-NEXT: store i32 [[TMP34]], i32* [[TMP40]], align 4
// CHECK19-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0
// CHECK19-NEXT: store i64 4, i64* [[TMP41]], align 4
// CHECK19-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0
// CHECK19-NEXT: store i8* null, i8** [[TMP42]], align 4
// CHECK19-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1
// CHECK19-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32*
// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP44]], align 4
// CHECK19-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1
// CHECK19-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32*
// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP46]], align 4
// CHECK19-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 1
// CHECK19-NEXT: store i64 4, i64* [[TMP47]], align 4
// CHECK19-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1
// CHECK19-NEXT: store i8* null, i8** [[TMP48]], align 4
// CHECK19-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2
// CHECK19-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to i32**
// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP50]], align 4
// CHECK19-NEXT: [[TMP51:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2
// CHECK19-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i32**
// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP52]], align 4
// CHECK19-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 2
// CHECK19-NEXT: store i64 [[TMP36]], i64* [[TMP53]], align 4
// CHECK19-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2
// CHECK19-NEXT: store i8* null, i8** [[TMP54]], align 4
// CHECK19-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
// CHECK19-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
// CHECK19-NEXT: [[TMP57:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0
// CHECK19-NEXT: [[TMP58:%.*]] = load i32, i32* [[N]], align 4
// CHECK19-NEXT: store i32 [[TMP58]], i32* [[DOTCAPTURE_EXPR_9]], align 4
// CHECK19-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4
// CHECK19-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP59]], 0
// CHECK19-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1
// CHECK19-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1
// CHECK19-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4
// CHECK19-NEXT: [[TMP60:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4
// CHECK19-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP60]], 1
// CHECK19-NEXT: [[TMP61:%.*]] = zext i32 [[ADD14]] to i64
// CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP61]])
// CHECK19-NEXT: [[TMP62:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143.region_id, i32 3, i8** [[TMP55]], i8** [[TMP56]], i64* [[TMP57]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK19-NEXT: [[TMP63:%.*]] = icmp ne i32 [[TMP62]], 0
// CHECK19-NEXT: br i1 [[TMP63]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]]
// CHECK19: omp_offload.failed15:
// CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143(i32 [[TMP34]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]]
// CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT16]]
// CHECK19: omp_offload.cont16:
// CHECK19-NEXT: [[TMP64:%.*]] = load i32, i32* [[M]], align 4
// CHECK19-NEXT: store i32 [[TMP64]], i32* [[DOTCAPTURE_EXPR_17]], align 4
// CHECK19-NEXT: [[TMP65:%.*]] = load i32, i32* [[N]], align 4
// CHECK19-NEXT: store i32 [[TMP65]], i32* [[N_CASTED18]], align 4
// CHECK19-NEXT: [[TMP66:%.*]] = load i32, i32* [[N_CASTED18]], align 4
// CHECK19-NEXT: [[TMP67:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_17]], align 4
// CHECK19-NEXT: store i32 [[TMP67]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK19-NEXT: [[TMP68:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK19-NEXT: [[TMP69:%.*]] = mul nuw i32 [[TMP0]], 4
// CHECK19-NEXT: [[TMP70:%.*]] = sext i32 [[TMP69]] to i64
// CHECK19-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0
// CHECK19-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i32*
// CHECK19-NEXT: store i32 [[TMP66]], i32* [[TMP72]], align 4
// CHECK19-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 0
// CHECK19-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32*
// CHECK19-NEXT: store i32 [[TMP66]], i32* [[TMP74]], align 4
// CHECK19-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 0
// CHECK19-NEXT: store i64 4, i64* [[TMP75]], align 4
// CHECK19-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 0
// CHECK19-NEXT: store i8* null, i8** [[TMP76]], align 4
// CHECK19-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 1
// CHECK19-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i32*
// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP78]], align 4
// CHECK19-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 1
// CHECK19-NEXT: [[TMP80:%.*]] = bitcast i8** [[TMP79]] to i32*
// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP80]], align 4
// CHECK19-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 1
// CHECK19-NEXT: store i64 4, i64* [[TMP81]], align 4
// CHECK19-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 1
// CHECK19-NEXT: store i8* null, i8** [[TMP82]], align 4
// CHECK19-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 2
// CHECK19-NEXT: [[TMP84:%.*]] = bitcast i8** [[TMP83]] to i32**
// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP84]], align 4
// CHECK19-NEXT: [[TMP85:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 2
// CHECK19-NEXT: [[TMP86:%.*]] = bitcast i8** [[TMP85]] to i32**
// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP86]], align 4
// CHECK19-NEXT: [[TMP87:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 2
// CHECK19-NEXT: store i64 [[TMP70]], i64* [[TMP87]], align 4
// CHECK19-NEXT: [[TMP88:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 2
// CHECK19-NEXT: store i8* null, i8** [[TMP88]], align 4
// CHECK19-NEXT: [[TMP89:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 3
// CHECK19-NEXT: [[TMP90:%.*]] = bitcast i8** [[TMP89]] to i32*
// CHECK19-NEXT: store i32 [[TMP68]], i32* [[TMP90]], align 4
// CHECK19-NEXT: [[TMP91:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 3
// CHECK19-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to i32*
// CHECK19-NEXT: store i32 [[TMP68]], i32* [[TMP92]], align 4
// CHECK19-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 3
// CHECK19-NEXT: store i64 4, i64* [[TMP93]], align 4
// CHECK19-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 3
// CHECK19-NEXT: store i8* null, i8** [[TMP94]], align 4
// CHECK19-NEXT: [[TMP95:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0
// CHECK19-NEXT: [[TMP96:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 0
// CHECK19-NEXT: [[TMP97:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 0
// CHECK19-NEXT: [[TMP98:%.*]] = load i32, i32* [[N]], align 4
// CHECK19-NEXT: store i32 [[TMP98]], i32* [[DOTCAPTURE_EXPR_24]], align 4
// CHECK19-NEXT: [[TMP99:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4
// CHECK19-NEXT: [[SUB26:%.*]] = sub nsw i32 [[TMP99]], 0
// CHECK19-NEXT: [[DIV27:%.*]] = sdiv i32 [[SUB26]], 1
// CHECK19-NEXT: [[SUB28:%.*]] = sub nsw i32 [[DIV27]], 1
// CHECK19-NEXT: store i32 [[SUB28]], i32* [[DOTCAPTURE_EXPR_25]], align 4
// CHECK19-NEXT: [[TMP100:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4
// CHECK19-NEXT: [[ADD29:%.*]] = add nsw i32 [[TMP100]], 1
// CHECK19-NEXT: [[TMP101:%.*]] = zext i32 [[ADD29]] to i64
// CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP101]])
// CHECK19-NEXT: [[TMP102:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147.region_id, i32 4, i8** [[TMP95]], i8** [[TMP96]], i64* [[TMP97]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK19-NEXT: [[TMP103:%.*]] = icmp ne i32 [[TMP102]], 0
// CHECK19-NEXT: br i1 [[TMP103]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]]
// CHECK19: omp_offload.failed30:
// CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147(i32 [[TMP66]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP68]]) #[[ATTR3]]
// CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT31]]
// CHECK19: omp_offload.cont31:
// CHECK19-NEXT: [[TMP104:%.*]] = load i32, i32* [[N]], align 4
// CHECK19-NEXT: store i32 [[TMP104]], i32* [[N_CASTED32]], align 4
// CHECK19-NEXT: [[TMP105:%.*]] = load i32, i32* [[N_CASTED32]], align 4
// CHECK19-NEXT: [[TMP106:%.*]] = mul nuw i32 [[TMP0]], 4
// CHECK19-NEXT: [[TMP107:%.*]] = sext i32 [[TMP106]] to i64
// CHECK19-NEXT: [[TMP108:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0
// CHECK19-NEXT: [[TMP109:%.*]] = bitcast i8** [[TMP108]] to i32*
// CHECK19-NEXT: store i32 [[TMP105]], i32* [[TMP109]], align 4
// CHECK19-NEXT: [[TMP110:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0
// CHECK19-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to i32*
// CHECK19-NEXT: store i32 [[TMP105]], i32* [[TMP111]], align 4
// CHECK19-NEXT: [[TMP112:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 0
// CHECK19-NEXT: store i64 4, i64* [[TMP112]], align 4
// CHECK19-NEXT: [[TMP113:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i32 0, i32 0
// CHECK19-NEXT: store i8* null, i8** [[TMP113]], align 4
// CHECK19-NEXT: [[TMP114:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 1
// CHECK19-NEXT: [[TMP115:%.*]] = bitcast i8** [[TMP114]] to i32*
// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP115]], align 4
// CHECK19-NEXT: [[TMP116:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 1
// CHECK19-NEXT: [[TMP117:%.*]] = bitcast i8** [[TMP116]] to i32*
// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP117]], align 4
// CHECK19-NEXT: [[TMP118:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 1
// CHECK19-NEXT: store i64 4, i64* [[TMP118]], align 4
// CHECK19-NEXT: [[TMP119:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i32 0, i32 1
// CHECK19-NEXT: store i8* null, i8** [[TMP119]], align 4
// CHECK19-NEXT: [[TMP120:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 2
// CHECK19-NEXT: [[TMP121:%.*]] = bitcast i8** [[TMP120]] to i32**
// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP121]], align 4
// CHECK19-NEXT: [[TMP122:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 2
// CHECK19-NEXT: [[TMP123:%.*]] = bitcast i8** [[TMP122]] to i32**
// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP123]], align 4
// CHECK19-NEXT: [[TMP124:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 2
// CHECK19-NEXT: store i64 [[TMP107]], i64* [[TMP124]], align 4
// CHECK19-NEXT: [[TMP125:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i32 0, i32 2
// CHECK19-NEXT: store i8* null, i8** [[TMP125]], align 4
// CHECK19-NEXT: [[TMP126:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0
// CHECK19-NEXT: [[TMP127:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0
// CHECK19-NEXT: [[TMP128:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 0
// CHECK19-NEXT: [[TMP129:%.*]] = load i32, i32* [[N]], align 4
// CHECK19-NEXT: store i32 [[TMP129]], i32* [[DOTCAPTURE_EXPR_38]], align 4
// CHECK19-NEXT: [[TMP130:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_38]], align 4
// CHECK19-NEXT: [[SUB40:%.*]] = sub nsw i32 [[TMP130]], 0
// CHECK19-NEXT: [[DIV41:%.*]] = sdiv i32 [[SUB40]], 1
// CHECK19-NEXT: [[SUB42:%.*]] = sub nsw i32 [[DIV41]], 1
// CHECK19-NEXT: store i32 [[SUB42]], i32* [[DOTCAPTURE_EXPR_39]], align 4
// CHECK19-NEXT: [[TMP131:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_39]], align 4
// CHECK19-NEXT: [[ADD43:%.*]] = add nsw i32 [[TMP131]], 1
// CHECK19-NEXT: [[TMP132:%.*]] = zext i32 [[ADD43]] to i64
// CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP132]])
// CHECK19-NEXT: [[TMP133:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151.region_id, i32 3, i8** [[TMP126]], i8** [[TMP127]], i64* [[TMP128]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK19-NEXT: [[TMP134:%.*]] = icmp ne i32 [[TMP133]], 0
// CHECK19-NEXT: br i1 [[TMP134]], label [[OMP_OFFLOAD_FAILED44:%.*]], label [[OMP_OFFLOAD_CONT45:%.*]]
// CHECK19: omp_offload.failed44:
// CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151(i32 [[TMP105]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]]
// CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT45]]
// CHECK19: omp_offload.cont45:
// CHECK19-NEXT: [[TMP135:%.*]] = load i32, i32* [[M]], align 4
// CHECK19-NEXT: store i32 [[TMP135]], i32* [[DOTCAPTURE_EXPR_46]], align 4
// CHECK19-NEXT: [[TMP136:%.*]] = load i32, i32* [[N]], align 4
// CHECK19-NEXT: store i32 [[TMP136]], i32* [[N_CASTED47]], align 4
// CHECK19-NEXT: [[TMP137:%.*]] = load i32, i32* [[N_CASTED47]], align 4
// CHECK19-NEXT: [[TMP138:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_46]], align 4
// CHECK19-NEXT: store i32 [[TMP138]], i32* [[DOTCAPTURE_EXPR__CASTED48]], align 4
// CHECK19-NEXT: [[TMP139:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED48]], align 4
// CHECK19-NEXT: [[TMP140:%.*]] = mul nuw i32 [[TMP0]], 4
// CHECK19-NEXT: [[TMP141:%.*]] = sext i32 [[TMP140]] to i64
// CHECK19-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 0
// CHECK19-NEXT: [[TMP143:%.*]] = bitcast i8** [[TMP142]] to i32*
// CHECK19-NEXT: store i32 [[TMP137]], i32* [[TMP143]], align 4
// CHECK19-NEXT: [[TMP144:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 0
// CHECK19-NEXT: [[TMP145:%.*]] = bitcast i8** [[TMP144]] to i32*
// CHECK19-NEXT: store i32 [[TMP137]], i32* [[TMP145]], align 4
// CHECK19-NEXT: [[TMP146:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 0
// CHECK19-NEXT: store i64 4, i64* [[TMP146]], align 4
// CHECK19-NEXT: [[TMP147:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 0
// CHECK19-NEXT: store i8* null, i8** [[TMP147]], align 4
// CHECK19-NEXT: [[TMP148:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 1
// CHECK19-NEXT: [[TMP149:%.*]] = bitcast i8** [[TMP148]] to i32*
// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP149]], align 4
// CHECK19-NEXT: [[TMP150:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 1
// CHECK19-NEXT: [[TMP151:%.*]] = bitcast i8** [[TMP150]] to i32*
// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP151]], align 4
// CHECK19-NEXT: [[TMP152:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 1
// CHECK19-NEXT: store i64 4, i64* [[TMP152]], align 4
// CHECK19-NEXT: [[TMP153:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 1
// CHECK19-NEXT: store i8* null, i8** [[TMP153]], align 4
// CHECK19-NEXT: [[TMP154:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 2
// CHECK19-NEXT: [[TMP155:%.*]] = bitcast i8** [[TMP154]] to i32**
// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP155]], align 4
// CHECK19-NEXT: [[TMP156:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 2
// CHECK19-NEXT: [[TMP157:%.*]] = bitcast i8** [[TMP156]] to i32**
// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP157]], align 4
// CHECK19-NEXT: [[TMP158:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 2
// CHECK19-NEXT: store i64 [[TMP141]], i64* [[TMP158]], align 4
// CHECK19-NEXT: [[TMP159:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 2
// CHECK19-NEXT: store i8* null, i8** [[TMP159]], align 4
// CHECK19-NEXT: [[TMP160:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 3
// CHECK19-NEXT: [[TMP161:%.*]] = bitcast i8** [[TMP160]] to i32*
// CHECK19-NEXT: store i32 [[TMP139]], i32* [[TMP161]], align 4
// CHECK19-NEXT: [[TMP162:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 3
// CHECK19-NEXT: [[TMP163:%.*]] = bitcast i8** [[TMP162]] to i32*
// CHECK19-NEXT: store i32 [[TMP139]], i32* [[TMP163]], align 4
// CHECK19-NEXT: [[TMP164:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 3
// CHECK19-NEXT: store i64 4, i64* [[TMP164]], align 4
// CHECK19-NEXT: [[TMP165:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 3
// CHECK19-NEXT: store i8* null, i8** [[TMP165]], align 4
// CHECK19-NEXT: [[TMP166:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 0
// CHECK19-NEXT: [[TMP167:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 0
// CHECK19-NEXT: [[TMP168:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 0
// CHECK19-NEXT: [[TMP169:%.*]] = load i32, i32* [[N]], align 4
// CHECK19-NEXT: store i32 [[TMP169]], i32* [[DOTCAPTURE_EXPR_54]], align 4
// CHECK19-NEXT: [[TMP170:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_54]], align 4
// CHECK19-NEXT: [[SUB56:%.*]] = sub nsw i32 [[TMP170]], 0
// CHECK19-NEXT: [[DIV57:%.*]] = sdiv i32 [[SUB56]], 1
// CHECK19-NEXT: [[SUB58:%.*]] = sub nsw i32 [[DIV57]], 1
// CHECK19-NEXT: store i32 [[SUB58]], i32* [[DOTCAPTURE_EXPR_55]], align 4
// CHECK19-NEXT: [[TMP171:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_55]], align 4
// CHECK19-NEXT: [[ADD59:%.*]] = add nsw i32 [[TMP171]], 1
// CHECK19-NEXT: [[TMP172:%.*]] = zext i32 [[ADD59]] to i64
// CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP172]])
// CHECK19-NEXT: [[TMP173:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155.region_id, i32 4, i8** [[TMP166]], i8** [[TMP167]], i64* [[TMP168]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK19-NEXT: [[TMP174:%.*]] = icmp ne i32 [[TMP173]], 0
// CHECK19-NEXT: br i1 [[TMP174]], label [[OMP_OFFLOAD_FAILED60:%.*]], label [[OMP_OFFLOAD_CONT61:%.*]]
// CHECK19: omp_offload.failed60:
// CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155(i32 [[TMP137]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP139]]) #[[ATTR3]]
// CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT61]]
// CHECK19: omp_offload.cont61:
// CHECK19-NEXT: [[TMP175:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4
// CHECK19-NEXT: [[CALL:%.*]] = call i32 @_Z5tmainIiLi10EEiT_(i32 [[TMP175]])
// CHECK19-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4
// CHECK19-NEXT: [[TMP176:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
// CHECK19-NEXT: call void @llvm.stackrestore(i8* [[TMP176]])
// CHECK19-NEXT: [[TMP177:%.*]] = load i32, i32* [[RETVAL]], align 4
// CHECK19-NEXT: ret i32 [[TMP177]]
//
//
// CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139
// CHECK19-SAME: (i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2:[0-9]+]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
// CHECK19-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK19-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK19-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK19-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK19-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK19-NEXT: store i32 [[TMP2]], i32* [[N_CASTED]], align 4
// CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]])
// CHECK19-NEXT: ret void
//
//
// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[I3:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
// CHECK19-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK19-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK19-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK19-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK19-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK19-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK19-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK19-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK19-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK19-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK19-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK19-NEXT: store i32 0, i32* [[I]], align 4
// CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK19-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK19-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK19: omp.precond.then:
// CHECK19-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK19-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK19-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK19-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
// CHECK19-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK19-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK19-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
// CHECK19-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK19: cond.true:
// CHECK19-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK19-NEXT: br label [[COND_END:%.*]]
// CHECK19: cond.false:
// CHECK19-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: br label [[COND_END]]
// CHECK19: cond.end:
// CHECK19-NEXT: [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
// CHECK19-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK19-NEXT: store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK19: omp.inner.for.cond:
// CHECK19-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
// CHECK19-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK19: omp.inner.for.body:
// CHECK19-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK19-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: [[TMP17:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK19-NEXT: store i32 [[TMP17]], i32* [[N_CASTED]], align 4
// CHECK19-NEXT: [[TMP18:%.*]] = load i32, i32* [[N_CASTED]], align 4
// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]])
// CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK19: omp.inner.for.inc:
// CHECK19-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
// CHECK19-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK19: omp.inner.for.end:
// CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK19: omp.loop.exit:
// CHECK19-NEXT: [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
// CHECK19-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
// CHECK19-NEXT: br label [[OMP_PRECOND_END]]
// CHECK19: omp.precond.end:
// CHECK19-NEXT: ret void
//
//
// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[I3:%.*]] = alloca i32, align 4
// CHECK19-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK19-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK19-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK19-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK19-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK19-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK19-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK19-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK19-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK19-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK19-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK19-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK19-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK19-NEXT: store i32 0, i32* [[I]], align 4
// CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK19-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK19-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK19: omp.precond.then:
// CHECK19-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK19-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
// CHECK19-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK19-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK19-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_LB]], align 4
// CHECK19-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
// CHECK19-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK19-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK19-NEXT: [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
// CHECK19-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP9]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK19-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK19-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK19-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
// CHECK19-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK19: cond.true:
// CHECK19-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK19-NEXT: br label [[COND_END:%.*]]
// CHECK19: cond.false:
// CHECK19-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK19-NEXT: br label [[COND_END]]
// CHECK19: cond.end:
// CHECK19-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
// CHECK19-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK19-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK19-NEXT: store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK19: omp.inner.for.cond:
// CHECK19-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK19-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
// CHECK19-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK19: omp.inner.for.body:
// CHECK19-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP17]], 1
// CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK19-NEXT: store i32 [[ADD]], i32* [[I3]], align 4
// CHECK19-NEXT: [[TMP18:%.*]] = load i32, i32* [[I3]], align 4
// CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 [[TMP18]]
// CHECK19-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK19: omp.body.continue:
// CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK19: omp.inner.for.inc:
// CHECK19-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP19]], 1
// CHECK19-NEXT: store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK19: omp.inner.for.end:
// CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK19: omp.loop.exit:
// CHECK19-NEXT: [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
// CHECK19-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
// CHECK19-NEXT: br label [[OMP_PRECOND_END]]
// CHECK19: omp.precond.end:
// CHECK19-NEXT: ret void
//
//
// CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143
// CHECK19-SAME: (i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
// CHECK19-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK19-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK19-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK19-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK19-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK19-NEXT: store i32 [[TMP2]], i32* [[N_CASTED]], align 4
// CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]])
// CHECK19-NEXT: ret void
//
//
// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..2
// CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[I3:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
// CHECK19-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK19-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK19-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK19-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK19-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK19-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK19-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK19-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK19-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK19-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK19-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK19-NEXT: store i32 0, i32* [[I]], align 4
// CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK19-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK19-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK19: omp.precond.then:
// CHECK19-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK19-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK19-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK19-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
// CHECK19-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK19-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK19-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
// CHECK19-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK19: cond.true:
// CHECK19-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK19-NEXT: br label [[COND_END:%.*]]
// CHECK19: cond.false:
// CHECK19-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: br label [[COND_END]]
// CHECK19: cond.end:
// CHECK19-NEXT: [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
// CHECK19-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK19-NEXT: store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK19: omp.inner.for.cond:
// CHECK19-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
// CHECK19-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK19: omp.inner.for.body:
// CHECK19-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK19-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: [[TMP17:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK19-NEXT: store i32 [[TMP17]], i32* [[N_CASTED]], align 4
// CHECK19-NEXT: [[TMP18:%.*]] = load i32, i32* [[N_CASTED]], align 4
// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]])
// CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK19: omp.inner.for.inc:
// CHECK19-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
// CHECK19-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK19: omp.inner.for.end:
// CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK19: omp.loop.exit:
// CHECK19-NEXT: [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
// CHECK19-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
// CHECK19-NEXT: br label [[OMP_PRECOND_END]]
// CHECK19: omp.precond.end:
// CHECK19-NEXT: ret void
//
//
// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..3
// CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[I3:%.*]] = alloca i32, align 4
// CHECK19-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK19-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK19-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK19-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK19-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK19-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK19-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK19-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK19-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK19-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK19-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK19-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK19-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK19-NEXT: store i32 0, i32* [[I]], align 4
// CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK19-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK19-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK19: omp.precond.then:
// CHECK19-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK19-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
// CHECK19-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK19-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK19-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_LB]], align 4
// CHECK19-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
// CHECK19-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK19-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK19-NEXT: [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
// CHECK19-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP9]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK19-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK19-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK19-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
// CHECK19-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK19: cond.true:
// CHECK19-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK19-NEXT: br label [[COND_END:%.*]]
// CHECK19: cond.false:
// CHECK19-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK19-NEXT: br label [[COND_END]]
// CHECK19: cond.end:
// CHECK19-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
// CHECK19-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK19-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK19-NEXT: store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK19: omp.inner.for.cond:
// CHECK19-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK19-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
// CHECK19-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK19: omp.inner.for.body:
// CHECK19-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP17]], 1
// CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK19-NEXT: store i32 [[ADD]], i32* [[I3]], align 4
// CHECK19-NEXT: [[TMP18:%.*]] = load i32, i32* [[I3]], align 4
// CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 [[TMP18]]
// CHECK19-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK19: omp.body.continue:
// CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK19: omp.inner.for.inc:
// CHECK19-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP19]], 1
// CHECK19-NEXT: store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK19: omp.inner.for.end:
// CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK19: omp.loop.exit:
// CHECK19-NEXT: [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
// CHECK19-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
// CHECK19-NEXT: br label [[OMP_PRECOND_END]]
// CHECK19: omp.precond.end:
// CHECK19-NEXT: ret void
//
//
// CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147
// CHECK19-SAME: (i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
// CHECK19-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK19-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK19-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK19-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK19-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK19-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK19-NEXT: store i32 [[TMP2]], i32* [[N_CASTED]], align 4
// CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
// CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK19-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]])
// CHECK19-NEXT: ret void
//
//
// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..5
// CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[I4:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
// CHECK19-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK19-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK19-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK19-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK19-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK19-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK19-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK19-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK19-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK19-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK19-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK19-NEXT: store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK19-NEXT: store i32 0, i32* [[I]], align 4
// CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK19-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK19-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK19: omp.precond.then:
// CHECK19-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK19-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK19-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK19-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK19-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
// CHECK19-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP6]])
// CHECK19-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK19-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
// CHECK19-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK19: cond.true:
// CHECK19-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK19-NEXT: br label [[COND_END:%.*]]
// CHECK19: cond.false:
// CHECK19-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: br label [[COND_END]]
// CHECK19: cond.end:
// CHECK19-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
// CHECK19-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK19-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK19: omp.inner.for.cond:
// CHECK19-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP15]], 1
// CHECK19-NEXT: [[CMP6:%.*]] = icmp slt i32 [[TMP14]], [[ADD]]
// CHECK19-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK19: omp.inner.for.body:
// CHECK19-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK19-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: [[TMP18:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK19-NEXT: store i32 [[TMP18]], i32* [[N_CASTED]], align 4
// CHECK19-NEXT: [[TMP19:%.*]] = load i32, i32* [[N_CASTED]], align 4
// CHECK19-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK19-NEXT: store i32 [[TMP20]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK19-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*, i32)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32 [[TMP19]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP21]])
// CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK19: omp.inner.for.inc:
// CHECK19-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK19-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
// CHECK19-NEXT: store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK19-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK19-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP24]], [[TMP25]]
// CHECK19-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_COMB_LB]], align 4
// CHECK19-NEXT: [[TMP26:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK19-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP26]], [[TMP27]]
// CHECK19-NEXT: store i32 [[ADD9]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK19-NEXT: [[CMP10:%.*]] = icmp sgt i32 [[TMP28]], [[TMP29]]
// CHECK19-NEXT: br i1 [[CMP10]], label [[COND_TRUE11:%.*]], label [[COND_FALSE12:%.*]]
// CHECK19: cond.true11:
// CHECK19-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK19-NEXT: br label [[COND_END13:%.*]]
// CHECK19: cond.false12:
// CHECK19-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: br label [[COND_END13]]
// CHECK19: cond.end13:
// CHECK19-NEXT: [[COND14:%.*]] = phi i32 [ [[TMP30]], [[COND_TRUE11]] ], [ [[TMP31]], [[COND_FALSE12]] ]
// CHECK19-NEXT: store i32 [[COND14]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK19-NEXT: store i32 [[TMP32]], i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK19: omp.inner.for.end:
// CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK19: omp.loop.exit:
// CHECK19-NEXT: [[TMP33:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: [[TMP34:%.*]] = load i32, i32* [[TMP33]], align 4
// CHECK19-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP34]])
// CHECK19-NEXT: br label [[OMP_PRECOND_END]]
// CHECK19: omp.precond.end:
// CHECK19-NEXT: ret void
//
//
// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..6
// CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[I4:%.*]] = alloca i32, align 4
// CHECK19-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK19-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK19-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK19-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK19-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK19-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK19-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK19-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK19-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK19-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK19-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK19-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK19-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK19-NEXT: store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK19-NEXT: store i32 0, i32* [[I]], align 4
// CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK19-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK19-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK19: omp.precond.then:
// CHECK19-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK19-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
// CHECK19-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK19-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK19-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_LB]], align 4
// CHECK19-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
// CHECK19-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK19-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK19-NEXT: [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
// CHECK19-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP9]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK19-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK19-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK19-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
// CHECK19-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK19: cond.true:
// CHECK19-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK19-NEXT: br label [[COND_END:%.*]]
// CHECK19: cond.false:
// CHECK19-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK19-NEXT: br label [[COND_END]]
// CHECK19: cond.end:
// CHECK19-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
// CHECK19-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK19-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK19-NEXT: store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK19: omp.inner.for.cond:
// CHECK19-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK19-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
// CHECK19-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK19: omp.inner.for.body:
// CHECK19-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP17]], 1
// CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK19-NEXT: store i32 [[ADD]], i32* [[I4]], align 4
// CHECK19-NEXT: [[TMP18:%.*]] = load i32, i32* [[I4]], align 4
// CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 [[TMP18]]
// CHECK19-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK19: omp.body.continue:
// CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK19: omp.inner.for.inc:
// CHECK19-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP19]], 1
// CHECK19-NEXT: store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK19: omp.inner.for.end:
// CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK19: omp.loop.exit:
// CHECK19-NEXT: [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
// CHECK19-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
// CHECK19-NEXT: br label [[OMP_PRECOND_END]]
// CHECK19: omp.precond.end:
// CHECK19-NEXT: ret void
//
//
// CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151
// CHECK19-SAME: (i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
// CHECK19-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK19-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK19-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK19-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK19-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK19-NEXT: store i32 [[TMP2]], i32* [[N_CASTED]], align 4
// CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]])
// CHECK19-NEXT: ret void
//
//
// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..8
// CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[I3:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
// CHECK19-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK19-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK19-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK19-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK19-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK19-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK19-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK19-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK19-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK19-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK19-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK19-NEXT: store i32 0, i32* [[I]], align 4
// CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK19-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK19-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK19: omp.precond.then:
// CHECK19-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK19-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK19-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK19-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
// CHECK19-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK19-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK19-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
// CHECK19-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK19: cond.true:
// CHECK19-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK19-NEXT: br label [[COND_END:%.*]]
// CHECK19: cond.false:
// CHECK19-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: br label [[COND_END]]
// CHECK19: cond.end:
// CHECK19-NEXT: [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
// CHECK19-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK19-NEXT: store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK19: omp.inner.for.cond:
// CHECK19-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
// CHECK19-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK19: omp.inner.for.body:
// CHECK19-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK19-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: [[TMP17:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK19-NEXT: store i32 [[TMP17]], i32* [[N_CASTED]], align 4
// CHECK19-NEXT: [[TMP18:%.*]] = load i32, i32* [[N_CASTED]], align 4
// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]])
// CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK19: omp.inner.for.inc:
// CHECK19-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
// CHECK19-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK19: omp.inner.for.end:
// CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK19: omp.loop.exit:
// CHECK19-NEXT: [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
// CHECK19-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
// CHECK19-NEXT: br label [[OMP_PRECOND_END]]
// CHECK19: omp.precond.end:
// CHECK19-NEXT: ret void
//
//
// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..9
// CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[I3:%.*]] = alloca i32, align 4
// CHECK19-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK19-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK19-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK19-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK19-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK19-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK19-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK19-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK19-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK19-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK19-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK19-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK19-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK19-NEXT: store i32 0, i32* [[I]], align 4
// CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK19-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK19-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK19: omp.precond.then:
// CHECK19-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK19-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
// CHECK19-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK19-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK19-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_LB]], align 4
// CHECK19-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
// CHECK19-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK19-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK19-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK19-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK19-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
// CHECK19-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP11]], i32 1073741859, i32 [[TMP8]], i32 [[TMP9]], i32 1, i32 1)
// CHECK19-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK19: omp.dispatch.cond:
// CHECK19-NEXT: [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
// CHECK19-NEXT: [[TMP14:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP13]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
// CHECK19-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP14]], 0
// CHECK19-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK19: omp.dispatch.body:
// CHECK19-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK19-NEXT: store i32 [[TMP15]], i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK19: omp.inner.for.cond:
// CHECK19-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !16
// CHECK19-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !16
// CHECK19-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
// CHECK19-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK19: omp.inner.for.body:
// CHECK19-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !16
// CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
// CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK19-NEXT: store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !16
// CHECK19-NEXT: [[TMP19:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !16
// CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 [[TMP19]]
// CHECK19-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !16
// CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK19: omp.body.continue:
// CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK19: omp.inner.for.inc:
// CHECK19-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !16
// CHECK19-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP20]], 1
// CHECK19-NEXT: store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !16
// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP17:![0-9]+]]
// CHECK19: omp.inner.for.end:
// CHECK19-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK19: omp.dispatch.inc:
// CHECK19-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK19: omp.dispatch.end:
// CHECK19-NEXT: br label [[OMP_PRECOND_END]]
// CHECK19: omp.precond.end:
// CHECK19-NEXT: ret void
//
//
// CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155
// CHECK19-SAME: (i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
// CHECK19-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK19-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK19-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK19-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK19-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK19-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK19-NEXT: store i32 [[TMP2]], i32* [[N_CASTED]], align 4
// CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
// CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK19-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]])
// CHECK19-NEXT: ret void
//
//
// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..11
// CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[I4:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
// CHECK19-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK19-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK19-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK19-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK19-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK19-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK19-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK19-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK19-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK19-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK19-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK19-NEXT: store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK19-NEXT: store i32 0, i32* [[I]], align 4
// CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK19-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK19-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK19: omp.precond.then:
// CHECK19-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK19-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK19-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK19-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
// CHECK19-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK19-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK19-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
// CHECK19-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK19: cond.true:
// CHECK19-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK19-NEXT: br label [[COND_END:%.*]]
// CHECK19: cond.false:
// CHECK19-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: br label [[COND_END]]
// CHECK19: cond.end:
// CHECK19-NEXT: [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
// CHECK19-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK19-NEXT: store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK19: omp.inner.for.cond:
// CHECK19-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
// CHECK19-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK19: omp.inner.for.body:
// CHECK19-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK19-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: [[TMP17:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK19-NEXT: store i32 [[TMP17]], i32* [[N_CASTED]], align 4
// CHECK19-NEXT: [[TMP18:%.*]] = load i32, i32* [[N_CASTED]], align 4
// CHECK19-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK19-NEXT: store i32 [[TMP19]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK19-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*, i32)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP20]])
// CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK19: omp.inner.for.inc:
// CHECK19-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
// CHECK19-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK19: omp.inner.for.end:
// CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK19: omp.loop.exit:
// CHECK19-NEXT: [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
// CHECK19-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
// CHECK19-NEXT: br label [[OMP_PRECOND_END]]
// CHECK19: omp.precond.end:
// CHECK19-NEXT: ret void
//
//
// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..12
// CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[I4:%.*]] = alloca i32, align 4
// CHECK19-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK19-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK19-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK19-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK19-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK19-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK19-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK19-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK19-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK19-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK19-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK19-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK19-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK19-NEXT: store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK19-NEXT: store i32 0, i32* [[I]], align 4
// CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK19-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK19-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK19: omp.precond.then:
// CHECK19-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK19-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
// CHECK19-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK19-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK19-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_LB]], align 4
// CHECK19-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
// CHECK19-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK19-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK19-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK19-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK19-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK19-NEXT: [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
// CHECK19-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP12]], i32 1073741859, i32 [[TMP9]], i32 [[TMP10]], i32 1, i32 [[TMP8]])
// CHECK19-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK19: omp.dispatch.cond:
// CHECK19-NEXT: [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
// CHECK19-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
// CHECK19-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP15]], 0
// CHECK19-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK19: omp.dispatch.body:
// CHECK19-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK19-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK19: omp.inner.for.cond:
// CHECK19-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
// CHECK19-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !19
// CHECK19-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
// CHECK19-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK19: omp.inner.for.body:
// CHECK19-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
// CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
// CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK19-NEXT: store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !19
// CHECK19-NEXT: [[TMP20:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !19
// CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 [[TMP20]]
// CHECK19-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !19
// CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK19: omp.body.continue:
// CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK19: omp.inner.for.inc:
// CHECK19-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
// CHECK19-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP21]], 1
// CHECK19-NEXT: store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]]
// CHECK19: omp.inner.for.end:
// CHECK19-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK19: omp.dispatch.inc:
// CHECK19-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK19: omp.dispatch.end:
// CHECK19-NEXT: br label [[OMP_PRECOND_END]]
// CHECK19: omp.precond.end:
// CHECK19-NEXT: ret void
//
//
// CHECK19-LABEL: define {{[^@]+}}@_Z5tmainIiLi10EEiT_
// CHECK19-SAME: (i32 [[ARGC:%.*]]) #[[ATTR4:[0-9]+]] comdat {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[A:%.*]] = alloca [10 x i32], align 4
// CHECK19-NEXT: [[M:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 4
// CHECK19-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 4
// CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 4
// CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS1:%.*]] = alloca [1 x i8*], align 4
// CHECK19-NEXT: [[DOTOFFLOAD_PTRS2:%.*]] = alloca [1 x i8*], align 4
// CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS3:%.*]] = alloca [1 x i8*], align 4
// CHECK19-NEXT: [[_TMP4:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS7:%.*]] = alloca [2 x i8*], align 4
// CHECK19-NEXT: [[DOTOFFLOAD_PTRS8:%.*]] = alloca [2 x i8*], align 4
// CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS9:%.*]] = alloca [2 x i8*], align 4
// CHECK19-NEXT: [[_TMP10:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS13:%.*]] = alloca [1 x i8*], align 4
// CHECK19-NEXT: [[DOTOFFLOAD_PTRS14:%.*]] = alloca [1 x i8*], align 4
// CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS15:%.*]] = alloca [1 x i8*], align 4
// CHECK19-NEXT: [[_TMP16:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTCAPTURE_EXPR_19:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTCAPTURE_EXPR__CASTED20:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS21:%.*]] = alloca [2 x i8*], align 4
// CHECK19-NEXT: [[DOTOFFLOAD_PTRS22:%.*]] = alloca [2 x i8*], align 4
// CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS23:%.*]] = alloca [2 x i8*], align 4
// CHECK19-NEXT: [[_TMP24:%.*]] = alloca i32, align 4
// CHECK19-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
// CHECK19-NEXT: store i32 10, i32* [[M]], align 4
// CHECK19-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK19-NEXT: [[TMP1:%.*]] = bitcast i8** [[TMP0]] to [10 x i32]**
// CHECK19-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP1]], align 4
// CHECK19-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK19-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to [10 x i32]**
// CHECK19-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP3]], align 4
// CHECK19-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// CHECK19-NEXT: store i8* null, i8** [[TMP4]], align 4
// CHECK19-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK19-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
// CHECK19-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK19-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0
// CHECK19-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK19: omp_offload.failed:
// CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112([10 x i32]* [[A]]) #[[ATTR3]]
// CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK19: omp_offload.cont:
// CHECK19-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
// CHECK19-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to [10 x i32]**
// CHECK19-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP10]], align 4
// CHECK19-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
// CHECK19-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to [10 x i32]**
// CHECK19-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP12]], align 4
// CHECK19-NEXT: [[TMP13:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i32 0, i32 0
// CHECK19-NEXT: store i8* null, i8** [[TMP13]], align 4
// CHECK19-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
// CHECK19-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
// CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
// CHECK19-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK19-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
// CHECK19-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]]
// CHECK19: omp_offload.failed5:
// CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116([10 x i32]* [[A]]) #[[ATTR3]]
// CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT6]]
// CHECK19: omp_offload.cont6:
// CHECK19-NEXT: [[TMP18:%.*]] = load i32, i32* [[M]], align 4
// CHECK19-NEXT: store i32 [[TMP18]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK19-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK19-NEXT: store i32 [[TMP19]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK19-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK19-NEXT: [[TMP21:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0
// CHECK19-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to [10 x i32]**
// CHECK19-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP22]], align 4
// CHECK19-NEXT: [[TMP23:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0
// CHECK19-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to [10 x i32]**
// CHECK19-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP24]], align 4
// CHECK19-NEXT: [[TMP25:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i32 0, i32 0
// CHECK19-NEXT: store i8* null, i8** [[TMP25]], align 4
// CHECK19-NEXT: [[TMP26:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 1
// CHECK19-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i32*
// CHECK19-NEXT: store i32 [[TMP20]], i32* [[TMP27]], align 4
// CHECK19-NEXT: [[TMP28:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 1
// CHECK19-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32*
// CHECK19-NEXT: store i32 [[TMP20]], i32* [[TMP29]], align 4
// CHECK19-NEXT: [[TMP30:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i32 0, i32 1
// CHECK19-NEXT: store i8* null, i8** [[TMP30]], align 4
// CHECK19-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0
// CHECK19-NEXT: [[TMP32:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0
// CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
// CHECK19-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.23, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.24, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK19-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0
// CHECK19-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]]
// CHECK19: omp_offload.failed11:
// CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120([10 x i32]* [[A]], i32 [[TMP20]]) #[[ATTR3]]
// CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT12]]
// CHECK19: omp_offload.cont12:
// CHECK19-NEXT: [[TMP35:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0
// CHECK19-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to [10 x i32]**
// CHECK19-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP36]], align 4
// CHECK19-NEXT: [[TMP37:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0
// CHECK19-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to [10 x i32]**
// CHECK19-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP38]], align 4
// CHECK19-NEXT: [[TMP39:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 0
// CHECK19-NEXT: store i8* null, i8** [[TMP39]], align 4
// CHECK19-NEXT: [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0
// CHECK19-NEXT: [[TMP41:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0
// CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
// CHECK19-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124.region_id, i32 1, i8** [[TMP40]], i8** [[TMP41]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.27, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.28, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK19-NEXT: [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0
// CHECK19-NEXT: br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]]
// CHECK19: omp_offload.failed17:
// CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124([10 x i32]* [[A]]) #[[ATTR3]]
// CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT18]]
// CHECK19: omp_offload.cont18:
// CHECK19-NEXT: [[TMP44:%.*]] = load i32, i32* [[M]], align 4
// CHECK19-NEXT: store i32 [[TMP44]], i32* [[DOTCAPTURE_EXPR_19]], align 4
// CHECK19-NEXT: [[TMP45:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_19]], align 4
// CHECK19-NEXT: store i32 [[TMP45]], i32* [[DOTCAPTURE_EXPR__CASTED20]], align 4
// CHECK19-NEXT: [[TMP46:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED20]], align 4
// CHECK19-NEXT: [[TMP47:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0
// CHECK19-NEXT: [[TMP48:%.*]] = bitcast i8** [[TMP47]] to [10 x i32]**
// CHECK19-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP48]], align 4
// CHECK19-NEXT: [[TMP49:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0
// CHECK19-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to [10 x i32]**
// CHECK19-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP50]], align 4
// CHECK19-NEXT: [[TMP51:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 0
// CHECK19-NEXT: store i8* null, i8** [[TMP51]], align 4
// CHECK19-NEXT: [[TMP52:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 1
// CHECK19-NEXT: [[TMP53:%.*]] = bitcast i8** [[TMP52]] to i32*
// CHECK19-NEXT: store i32 [[TMP46]], i32* [[TMP53]], align 4
// CHECK19-NEXT: [[TMP54:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 1
// CHECK19-NEXT: [[TMP55:%.*]] = bitcast i8** [[TMP54]] to i32*
// CHECK19-NEXT: store i32 [[TMP46]], i32* [[TMP55]], align 4
// CHECK19-NEXT: [[TMP56:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 1
// CHECK19-NEXT: store i8* null, i8** [[TMP56]], align 4
// CHECK19-NEXT: [[TMP57:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0
// CHECK19-NEXT: [[TMP58:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0
// CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
// CHECK19-NEXT: [[TMP59:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128.region_id, i32 2, i8** [[TMP57]], i8** [[TMP58]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.31, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.32, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK19-NEXT: [[TMP60:%.*]] = icmp ne i32 [[TMP59]], 0
// CHECK19-NEXT: br i1 [[TMP60]], label [[OMP_OFFLOAD_FAILED25:%.*]], label [[OMP_OFFLOAD_CONT26:%.*]]
// CHECK19: omp_offload.failed25:
// CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128([10 x i32]* [[A]], i32 [[TMP46]]) #[[ATTR3]]
// CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT26]]
// CHECK19: omp_offload.cont26:
// CHECK19-NEXT: ret i32 0
//
//
// CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112
// CHECK19-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK19-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
// CHECK19-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]])
// CHECK19-NEXT: ret void
//
//
// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..14
// CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK19-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK19-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
// CHECK19-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
// CHECK19-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK19-NEXT: store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK19-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK19-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK19-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
// CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK19: cond.true:
// CHECK19-NEXT: br label [[COND_END:%.*]]
// CHECK19: cond.false:
// CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: br label [[COND_END]]
// CHECK19: cond.end:
// CHECK19-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK19-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK19-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK19: omp.inner.for.cond:
// CHECK19-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK19-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK19: omp.inner.for.body:
// CHECK19-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK19-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]])
// CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK19: omp.inner.for.inc:
// CHECK19-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
// CHECK19-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK19: omp.inner.for.end:
// CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK19: omp.loop.exit:
// CHECK19-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK19-NEXT: ret void
//
//
// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..15
// CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK19-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK19-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK19-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK19-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
// CHECK19-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
// CHECK19-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK19-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4
// CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK19-NEXT: store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
// CHECK19-NEXT: store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
// CHECK19-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK19-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK19-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK19-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 9
// CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK19: cond.true:
// CHECK19-NEXT: br label [[COND_END:%.*]]
// CHECK19: cond.false:
// CHECK19-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK19-NEXT: br label [[COND_END]]
// CHECK19: cond.end:
// CHECK19-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
// CHECK19-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK19-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK19-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK19: omp.inner.for.cond:
// CHECK19-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK19-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
// CHECK19-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK19: omp.inner.for.body:
// CHECK19-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
// CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK19-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK19-NEXT: [[TMP11:%.*]] = load i32, i32* [[I]], align 4
// CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 [[TMP11]]
// CHECK19-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK19: omp.body.continue:
// CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK19: omp.inner.for.inc:
// CHECK19-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP12]], 1
// CHECK19-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK19: omp.inner.for.end:
// CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK19: omp.loop.exit:
// CHECK19-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
// CHECK19-NEXT: ret void
//
//
// CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116
// CHECK19-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK19-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
// CHECK19-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..17 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]])
// CHECK19-NEXT: ret void
//
//
// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..17
// CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK19-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK19-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
// CHECK19-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
// CHECK19-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK19-NEXT: store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK19-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK19-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK19-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
// CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK19: cond.true:
// CHECK19-NEXT: br label [[COND_END:%.*]]
// CHECK19: cond.false:
// CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: br label [[COND_END]]
// CHECK19: cond.end:
// CHECK19-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK19-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK19-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK19: omp.inner.for.cond:
// CHECK19-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK19-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK19: omp.inner.for.body:
// CHECK19-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK19-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]])
// CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK19: omp.inner.for.inc:
// CHECK19-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
// CHECK19-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK19: omp.inner.for.end:
// CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK19: omp.loop.exit:
// CHECK19-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK19-NEXT: ret void
//
//
// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..18
// CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK19-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK19-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK19-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK19-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
// CHECK19-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
// CHECK19-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK19-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4
// CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK19-NEXT: store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
// CHECK19-NEXT: store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
// CHECK19-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK19-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK19-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK19-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 9
// CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK19: cond.true:
// CHECK19-NEXT: br label [[COND_END:%.*]]
// CHECK19: cond.false:
// CHECK19-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK19-NEXT: br label [[COND_END]]
// CHECK19: cond.end:
// CHECK19-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
// CHECK19-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK19-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK19-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK19: omp.inner.for.cond:
// CHECK19-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK19-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
// CHECK19-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK19: omp.inner.for.body:
// CHECK19-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
// CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK19-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK19-NEXT: [[TMP11:%.*]] = load i32, i32* [[I]], align 4
// CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 [[TMP11]]
// CHECK19-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK19: omp.body.continue:
// CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK19: omp.inner.for.inc:
// CHECK19-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP12]], 1
// CHECK19-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK19: omp.inner.for.end:
// CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK19: omp.loop.exit:
// CHECK19-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
// CHECK19-NEXT: ret void
//
//
// CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120
// CHECK19-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK19-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
// CHECK19-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
// CHECK19-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK19-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
// CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK19-NEXT: store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..21 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP2]])
// CHECK19-NEXT: ret void
//
//
// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..21
// CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK19-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
// CHECK19-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK19-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
// CHECK19-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK19-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
// CHECK19-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK19-NEXT: store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK19-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK19-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK19-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
// CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK19: cond.true:
// CHECK19-NEXT: br label [[COND_END:%.*]]
// CHECK19: cond.false:
// CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: br label [[COND_END]]
// CHECK19: cond.end:
// CHECK19-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK19-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK19-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK19: omp.inner.for.cond:
// CHECK19-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK19-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK19: omp.inner.for.body:
// CHECK19-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK19-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK19-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK19-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]])
// CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK19: omp.inner.for.inc:
// CHECK19-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
// CHECK19-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK19: omp.inner.for.end:
// CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK19: omp.loop.exit:
// CHECK19-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK19-NEXT: ret void
//
//
// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..22
// CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK19-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK19-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK19-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK19-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK19-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
// CHECK19-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK19-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
// CHECK19-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK19-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4
// CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK19-NEXT: store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
// CHECK19-NEXT: store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
// CHECK19-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK19-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK19-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
// CHECK19-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP5]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP3]])
// CHECK19-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK19: omp.dispatch.cond:
// CHECK19-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK19-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], [[TMP7]]
// CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK19: cond.true:
// CHECK19-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK19-NEXT: br label [[COND_END:%.*]]
// CHECK19: cond.false:
// CHECK19-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK19-NEXT: br label [[COND_END]]
// CHECK19: cond.end:
// CHECK19-NEXT: [[COND:%.*]] = phi i32 [ [[TMP8]], [[COND_TRUE]] ], [ [[TMP9]], [[COND_FALSE]] ]
// CHECK19-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK19-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK19-NEXT: store i32 [[TMP10]], i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK19-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]]
// CHECK19-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK19: omp.dispatch.body:
// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK19: omp.inner.for.cond:
// CHECK19-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK19-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
// CHECK19-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK19: omp.inner.for.body:
// CHECK19-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP15]], 1
// CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK19-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK19-NEXT: [[TMP16:%.*]] = load i32, i32* [[I]], align 4
// CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 [[TMP16]]
// CHECK19-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK19: omp.body.continue:
// CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK19: omp.inner.for.inc:
// CHECK19-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP17]], 1
// CHECK19-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK19: omp.inner.for.end:
// CHECK19-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK19: omp.dispatch.inc:
// CHECK19-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK19-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK19-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
// CHECK19-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_LB]], align 4
// CHECK19-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK19-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK19-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
// CHECK19-NEXT: store i32 [[ADD5]], i32* [[DOTOMP_UB]], align 4
// CHECK19-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK19: omp.dispatch.end:
// CHECK19-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
// CHECK19-NEXT: ret void
//
//
// CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124
// CHECK19-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK19-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
// CHECK19-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..25 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]])
// CHECK19-NEXT: ret void
//
//
// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..25
// CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK19-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK19-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
// CHECK19-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
// CHECK19-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK19-NEXT: store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK19-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK19-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK19-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
// CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK19: cond.true:
// CHECK19-NEXT: br label [[COND_END:%.*]]
// CHECK19: cond.false:
// CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: br label [[COND_END]]
// CHECK19: cond.end:
// CHECK19-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK19-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK19-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK19: omp.inner.for.cond:
// CHECK19-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK19-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK19: omp.inner.for.body:
// CHECK19-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK19-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]])
// CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK19: omp.inner.for.inc:
// CHECK19-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
// CHECK19-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK19: omp.inner.for.end:
// CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK19: omp.loop.exit:
// CHECK19-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK19-NEXT: ret void
//
//
// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..26
// CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK19-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK19-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK19-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK19-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
// CHECK19-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
// CHECK19-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK19-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4
// CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK19-NEXT: store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
// CHECK19-NEXT: store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
// CHECK19-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK19-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK19-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
// CHECK19-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32 1073741859, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 1)
// CHECK19-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK19: omp.dispatch.cond:
// CHECK19-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
// CHECK19-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
// CHECK19-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK19: omp.dispatch.body:
// CHECK19-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK19-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK19: omp.inner.for.cond:
// CHECK19-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
// CHECK19-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !22
// CHECK19-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
// CHECK19-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK19: omp.inner.for.body:
// CHECK19-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
// CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
// CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK19-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !22
// CHECK19-NEXT: [[TMP12:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !22
// CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 [[TMP12]]
// CHECK19-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !22
// CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK19: omp.body.continue:
// CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK19: omp.inner.for.inc:
// CHECK19-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
// CHECK19-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP13]], 1
// CHECK19-NEXT: store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]]
// CHECK19: omp.inner.for.end:
// CHECK19-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK19: omp.dispatch.inc:
// CHECK19-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK19: omp.dispatch.end:
// CHECK19-NEXT: ret void
//
//
// CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128
// CHECK19-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK19-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
// CHECK19-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
// CHECK19-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK19-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
// CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK19-NEXT: store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..29 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP2]])
// CHECK19-NEXT: ret void
//
//
// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..29
// CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK19-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
// CHECK19-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK19-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
// CHECK19-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK19-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
// CHECK19-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK19-NEXT: store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK19-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK19-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK19-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
// CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK19: cond.true:
// CHECK19-NEXT: br label [[COND_END:%.*]]
// CHECK19: cond.false:
// CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: br label [[COND_END]]
// CHECK19: cond.end:
// CHECK19-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK19-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK19-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK19: omp.inner.for.cond:
// CHECK19-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK19-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK19: omp.inner.for.body:
// CHECK19-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK19-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK19-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK19-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK19-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..30 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]])
// CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK19: omp.inner.for.inc:
// CHECK19-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
// CHECK19-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK19: omp.inner.for.end:
// CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK19: omp.loop.exit:
// CHECK19-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK19-NEXT: ret void
//
//
// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..30
// CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK19-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK19-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK19-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK19-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK19-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK19-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
// CHECK19-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK19-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
// CHECK19-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK19-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4
// CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK19-NEXT: store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
// CHECK19-NEXT: store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
// CHECK19-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK19-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK19-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK19-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
// CHECK19-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], i32 1073741859, i32 [[TMP4]], i32 [[TMP5]], i32 1, i32 [[TMP3]])
// CHECK19-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK19: omp.dispatch.cond:
// CHECK19-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
// CHECK19-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP8]], 0
// CHECK19-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK19: omp.dispatch.body:
// CHECK19-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK19-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK19: omp.inner.for.cond:
// CHECK19-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
// CHECK19-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !25
// CHECK19-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
// CHECK19-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK19: omp.inner.for.body:
// CHECK19-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
// CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
// CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK19-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !25
// CHECK19-NEXT: [[TMP13:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !25
// CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 [[TMP13]]
// CHECK19-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !25
// CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK19: omp.body.continue:
// CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK19: omp.inner.for.inc:
// CHECK19-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
// CHECK19-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP14]], 1
// CHECK19-NEXT: store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]]
// CHECK19: omp.inner.for.end:
// CHECK19-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK19: omp.dispatch.inc:
// CHECK19-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK19: omp.dispatch.end:
// CHECK19-NEXT: ret void
//
//
// CHECK19-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
// CHECK19-SAME: () #[[ATTR5:[0-9]+]] {
// CHECK19-NEXT: entry:
// CHECK19-NEXT: call void @__tgt_register_requires(i64 1)
// CHECK19-NEXT: ret void
//
//
// CHECK20-LABEL: define {{[^@]+}}@main
// CHECK20-SAME: (i32 [[ARGC:%.*]], i8** [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK20-NEXT: entry:
// CHECK20-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[ARGV_ADDR:%.*]] = alloca i8**, align 4
// CHECK20-NEXT: [[N:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4
// CHECK20-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[M:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4
// CHECK20-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4
// CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4
// CHECK20-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4
// CHECK20-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[N_CASTED3:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [3 x i8*], align 4
// CHECK20-NEXT: [[DOTOFFLOAD_PTRS5:%.*]] = alloca [3 x i8*], align 4
// CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [3 x i8*], align 4
// CHECK20-NEXT: [[DOTOFFLOAD_SIZES7:%.*]] = alloca [3 x i64], align 4
// CHECK20-NEXT: [[_TMP8:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTCAPTURE_EXPR_17:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[N_CASTED18:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS19:%.*]] = alloca [4 x i8*], align 4
// CHECK20-NEXT: [[DOTOFFLOAD_PTRS20:%.*]] = alloca [4 x i8*], align 4
// CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS21:%.*]] = alloca [4 x i8*], align 4
// CHECK20-NEXT: [[DOTOFFLOAD_SIZES22:%.*]] = alloca [4 x i64], align 4
// CHECK20-NEXT: [[_TMP23:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[N_CASTED32:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS33:%.*]] = alloca [3 x i8*], align 4
// CHECK20-NEXT: [[DOTOFFLOAD_PTRS34:%.*]] = alloca [3 x i8*], align 4
// CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS35:%.*]] = alloca [3 x i8*], align 4
// CHECK20-NEXT: [[DOTOFFLOAD_SIZES36:%.*]] = alloca [3 x i64], align 4
// CHECK20-NEXT: [[_TMP37:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTCAPTURE_EXPR_38:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTCAPTURE_EXPR_39:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTCAPTURE_EXPR_46:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[N_CASTED47:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTCAPTURE_EXPR__CASTED48:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS49:%.*]] = alloca [4 x i8*], align 4
// CHECK20-NEXT: [[DOTOFFLOAD_PTRS50:%.*]] = alloca [4 x i8*], align 4
// CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS51:%.*]] = alloca [4 x i8*], align 4
// CHECK20-NEXT: [[DOTOFFLOAD_SIZES52:%.*]] = alloca [4 x i64], align 4
// CHECK20-NEXT: [[_TMP53:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTCAPTURE_EXPR_54:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTCAPTURE_EXPR_55:%.*]] = alloca i32, align 4
// CHECK20-NEXT: store i32 0, i32* [[RETVAL]], align 4
// CHECK20-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
// CHECK20-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 4
// CHECK20-NEXT: store i32 100, i32* [[N]], align 4
// CHECK20-NEXT: [[TMP0:%.*]] = load i32, i32* [[N]], align 4
// CHECK20-NEXT: [[TMP1:%.*]] = call i8* @llvm.stacksave()
// CHECK20-NEXT: store i8* [[TMP1]], i8** [[SAVED_STACK]], align 4
// CHECK20-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4
// CHECK20-NEXT: store i32 [[TMP0]], i32* [[__VLA_EXPR0]], align 4
// CHECK20-NEXT: store i32 10, i32* [[M]], align 4
// CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[N]], align 4
// CHECK20-NEXT: store i32 [[TMP2]], i32* [[N_CASTED]], align 4
// CHECK20-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
// CHECK20-NEXT: [[TMP4:%.*]] = mul nuw i32 [[TMP0]], 4
// CHECK20-NEXT: [[TMP5:%.*]] = sext i32 [[TMP4]] to i64
// CHECK20-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK20-NEXT: [[TMP7:%.*]] = bitcast i8** [[TMP6]] to i32*
// CHECK20-NEXT: store i32 [[TMP3]], i32* [[TMP7]], align 4
// CHECK20-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK20-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i32*
// CHECK20-NEXT: store i32 [[TMP3]], i32* [[TMP9]], align 4
// CHECK20-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
// CHECK20-NEXT: store i64 4, i64* [[TMP10]], align 4
// CHECK20-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// CHECK20-NEXT: store i8* null, i8** [[TMP11]], align 4
// CHECK20-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
// CHECK20-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32*
// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP13]], align 4
// CHECK20-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
// CHECK20-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32*
// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP15]], align 4
// CHECK20-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1
// CHECK20-NEXT: store i64 4, i64* [[TMP16]], align 4
// CHECK20-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
// CHECK20-NEXT: store i8* null, i8** [[TMP17]], align 4
// CHECK20-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
// CHECK20-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32**
// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 4
// CHECK20-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
// CHECK20-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32**
// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 4
// CHECK20-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2
// CHECK20-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 4
// CHECK20-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
// CHECK20-NEXT: store i8* null, i8** [[TMP23]], align 4
// CHECK20-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK20-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK20-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
// CHECK20-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4
// CHECK20-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK20-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK20-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0
// CHECK20-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK20-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK20-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK20-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK20-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1
// CHECK20-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64
// CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]])
// CHECK20-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK20-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0
// CHECK20-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK20: omp_offload.failed:
// CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139(i32 [[TMP3]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3:[0-9]+]]
// CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK20: omp_offload.cont:
// CHECK20-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4
// CHECK20-NEXT: store i32 [[TMP33]], i32* [[N_CASTED3]], align 4
// CHECK20-NEXT: [[TMP34:%.*]] = load i32, i32* [[N_CASTED3]], align 4
// CHECK20-NEXT: [[TMP35:%.*]] = mul nuw i32 [[TMP0]], 4
// CHECK20-NEXT: [[TMP36:%.*]] = sext i32 [[TMP35]] to i64
// CHECK20-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
// CHECK20-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32*
// CHECK20-NEXT: store i32 [[TMP34]], i32* [[TMP38]], align 4
// CHECK20-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
// CHECK20-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i32*
// CHECK20-NEXT: store i32 [[TMP34]], i32* [[TMP40]], align 4
// CHECK20-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0
// CHECK20-NEXT: store i64 4, i64* [[TMP41]], align 4
// CHECK20-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0
// CHECK20-NEXT: store i8* null, i8** [[TMP42]], align 4
// CHECK20-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1
// CHECK20-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32*
// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP44]], align 4
// CHECK20-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1
// CHECK20-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32*
// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP46]], align 4
// CHECK20-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 1
// CHECK20-NEXT: store i64 4, i64* [[TMP47]], align 4
// CHECK20-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1
// CHECK20-NEXT: store i8* null, i8** [[TMP48]], align 4
// CHECK20-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2
// CHECK20-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to i32**
// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP50]], align 4
// CHECK20-NEXT: [[TMP51:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2
// CHECK20-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i32**
// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP52]], align 4
// CHECK20-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 2
// CHECK20-NEXT: store i64 [[TMP36]], i64* [[TMP53]], align 4
// CHECK20-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2
// CHECK20-NEXT: store i8* null, i8** [[TMP54]], align 4
// CHECK20-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
// CHECK20-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
// CHECK20-NEXT: [[TMP57:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0
// CHECK20-NEXT: [[TMP58:%.*]] = load i32, i32* [[N]], align 4
// CHECK20-NEXT: store i32 [[TMP58]], i32* [[DOTCAPTURE_EXPR_9]], align 4
// CHECK20-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4
// CHECK20-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP59]], 0
// CHECK20-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1
// CHECK20-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1
// CHECK20-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4
// CHECK20-NEXT: [[TMP60:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4
// CHECK20-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP60]], 1
// CHECK20-NEXT: [[TMP61:%.*]] = zext i32 [[ADD14]] to i64
// CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP61]])
// CHECK20-NEXT: [[TMP62:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143.region_id, i32 3, i8** [[TMP55]], i8** [[TMP56]], i64* [[TMP57]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK20-NEXT: [[TMP63:%.*]] = icmp ne i32 [[TMP62]], 0
// CHECK20-NEXT: br i1 [[TMP63]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]]
// CHECK20: omp_offload.failed15:
// CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143(i32 [[TMP34]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]]
// CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT16]]
// CHECK20: omp_offload.cont16:
// CHECK20-NEXT: [[TMP64:%.*]] = load i32, i32* [[M]], align 4
// CHECK20-NEXT: store i32 [[TMP64]], i32* [[DOTCAPTURE_EXPR_17]], align 4
// CHECK20-NEXT: [[TMP65:%.*]] = load i32, i32* [[N]], align 4
// CHECK20-NEXT: store i32 [[TMP65]], i32* [[N_CASTED18]], align 4
// CHECK20-NEXT: [[TMP66:%.*]] = load i32, i32* [[N_CASTED18]], align 4
// CHECK20-NEXT: [[TMP67:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_17]], align 4
// CHECK20-NEXT: store i32 [[TMP67]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK20-NEXT: [[TMP68:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK20-NEXT: [[TMP69:%.*]] = mul nuw i32 [[TMP0]], 4
// CHECK20-NEXT: [[TMP70:%.*]] = sext i32 [[TMP69]] to i64
// CHECK20-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0
// CHECK20-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i32*
// CHECK20-NEXT: store i32 [[TMP66]], i32* [[TMP72]], align 4
// CHECK20-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 0
// CHECK20-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32*
// CHECK20-NEXT: store i32 [[TMP66]], i32* [[TMP74]], align 4
// CHECK20-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 0
// CHECK20-NEXT: store i64 4, i64* [[TMP75]], align 4
// CHECK20-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 0
// CHECK20-NEXT: store i8* null, i8** [[TMP76]], align 4
// CHECK20-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 1
// CHECK20-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i32*
// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP78]], align 4
// CHECK20-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 1
// CHECK20-NEXT: [[TMP80:%.*]] = bitcast i8** [[TMP79]] to i32*
// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP80]], align 4
// CHECK20-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 1
// CHECK20-NEXT: store i64 4, i64* [[TMP81]], align 4
// CHECK20-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 1
// CHECK20-NEXT: store i8* null, i8** [[TMP82]], align 4
// CHECK20-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 2
// CHECK20-NEXT: [[TMP84:%.*]] = bitcast i8** [[TMP83]] to i32**
// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP84]], align 4
// CHECK20-NEXT: [[TMP85:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 2
// CHECK20-NEXT: [[TMP86:%.*]] = bitcast i8** [[TMP85]] to i32**
// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP86]], align 4
// CHECK20-NEXT: [[TMP87:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 2
// CHECK20-NEXT: store i64 [[TMP70]], i64* [[TMP87]], align 4
// CHECK20-NEXT: [[TMP88:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 2
// CHECK20-NEXT: store i8* null, i8** [[TMP88]], align 4
// CHECK20-NEXT: [[TMP89:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 3
// CHECK20-NEXT: [[TMP90:%.*]] = bitcast i8** [[TMP89]] to i32*
// CHECK20-NEXT: store i32 [[TMP68]], i32* [[TMP90]], align 4
// CHECK20-NEXT: [[TMP91:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 3
// CHECK20-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to i32*
// CHECK20-NEXT: store i32 [[TMP68]], i32* [[TMP92]], align 4
// CHECK20-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 3
// CHECK20-NEXT: store i64 4, i64* [[TMP93]], align 4
// CHECK20-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 3
// CHECK20-NEXT: store i8* null, i8** [[TMP94]], align 4
// CHECK20-NEXT: [[TMP95:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0
// CHECK20-NEXT: [[TMP96:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 0
// CHECK20-NEXT: [[TMP97:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 0
// CHECK20-NEXT: [[TMP98:%.*]] = load i32, i32* [[N]], align 4
// CHECK20-NEXT: store i32 [[TMP98]], i32* [[DOTCAPTURE_EXPR_24]], align 4
// CHECK20-NEXT: [[TMP99:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4
// CHECK20-NEXT: [[SUB26:%.*]] = sub nsw i32 [[TMP99]], 0
// CHECK20-NEXT: [[DIV27:%.*]] = sdiv i32 [[SUB26]], 1
// CHECK20-NEXT: [[SUB28:%.*]] = sub nsw i32 [[DIV27]], 1
// CHECK20-NEXT: store i32 [[SUB28]], i32* [[DOTCAPTURE_EXPR_25]], align 4
// CHECK20-NEXT: [[TMP100:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4
// CHECK20-NEXT: [[ADD29:%.*]] = add nsw i32 [[TMP100]], 1
// CHECK20-NEXT: [[TMP101:%.*]] = zext i32 [[ADD29]] to i64
// CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP101]])
// CHECK20-NEXT: [[TMP102:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147.region_id, i32 4, i8** [[TMP95]], i8** [[TMP96]], i64* [[TMP97]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK20-NEXT: [[TMP103:%.*]] = icmp ne i32 [[TMP102]], 0
// CHECK20-NEXT: br i1 [[TMP103]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]]
// CHECK20: omp_offload.failed30:
// CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147(i32 [[TMP66]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP68]]) #[[ATTR3]]
// CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT31]]
// CHECK20: omp_offload.cont31:
// CHECK20-NEXT: [[TMP104:%.*]] = load i32, i32* [[N]], align 4
// CHECK20-NEXT: store i32 [[TMP104]], i32* [[N_CASTED32]], align 4
// CHECK20-NEXT: [[TMP105:%.*]] = load i32, i32* [[N_CASTED32]], align 4
// CHECK20-NEXT: [[TMP106:%.*]] = mul nuw i32 [[TMP0]], 4
// CHECK20-NEXT: [[TMP107:%.*]] = sext i32 [[TMP106]] to i64
// CHECK20-NEXT: [[TMP108:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0
// CHECK20-NEXT: [[TMP109:%.*]] = bitcast i8** [[TMP108]] to i32*
// CHECK20-NEXT: store i32 [[TMP105]], i32* [[TMP109]], align 4
// CHECK20-NEXT: [[TMP110:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0
// CHECK20-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to i32*
// CHECK20-NEXT: store i32 [[TMP105]], i32* [[TMP111]], align 4
// CHECK20-NEXT: [[TMP112:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 0
// CHECK20-NEXT: store i64 4, i64* [[TMP112]], align 4
// CHECK20-NEXT: [[TMP113:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i32 0, i32 0
// CHECK20-NEXT: store i8* null, i8** [[TMP113]], align 4
// CHECK20-NEXT: [[TMP114:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 1
// CHECK20-NEXT: [[TMP115:%.*]] = bitcast i8** [[TMP114]] to i32*
// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP115]], align 4
// CHECK20-NEXT: [[TMP116:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 1
// CHECK20-NEXT: [[TMP117:%.*]] = bitcast i8** [[TMP116]] to i32*
// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP117]], align 4
// CHECK20-NEXT: [[TMP118:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 1
// CHECK20-NEXT: store i64 4, i64* [[TMP118]], align 4
// CHECK20-NEXT: [[TMP119:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i32 0, i32 1
// CHECK20-NEXT: store i8* null, i8** [[TMP119]], align 4
// CHECK20-NEXT: [[TMP120:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 2
// CHECK20-NEXT: [[TMP121:%.*]] = bitcast i8** [[TMP120]] to i32**
// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP121]], align 4
// CHECK20-NEXT: [[TMP122:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 2
// CHECK20-NEXT: [[TMP123:%.*]] = bitcast i8** [[TMP122]] to i32**
// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP123]], align 4
// CHECK20-NEXT: [[TMP124:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 2
// CHECK20-NEXT: store i64 [[TMP107]], i64* [[TMP124]], align 4
// CHECK20-NEXT: [[TMP125:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i32 0, i32 2
// CHECK20-NEXT: store i8* null, i8** [[TMP125]], align 4
// CHECK20-NEXT: [[TMP126:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0
// CHECK20-NEXT: [[TMP127:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0
// CHECK20-NEXT: [[TMP128:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 0
// CHECK20-NEXT: [[TMP129:%.*]] = load i32, i32* [[N]], align 4
// CHECK20-NEXT: store i32 [[TMP129]], i32* [[DOTCAPTURE_EXPR_38]], align 4
// CHECK20-NEXT: [[TMP130:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_38]], align 4
// CHECK20-NEXT: [[SUB40:%.*]] = sub nsw i32 [[TMP130]], 0
// CHECK20-NEXT: [[DIV41:%.*]] = sdiv i32 [[SUB40]], 1
// CHECK20-NEXT: [[SUB42:%.*]] = sub nsw i32 [[DIV41]], 1
// CHECK20-NEXT: store i32 [[SUB42]], i32* [[DOTCAPTURE_EXPR_39]], align 4
// CHECK20-NEXT: [[TMP131:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_39]], align 4
// CHECK20-NEXT: [[ADD43:%.*]] = add nsw i32 [[TMP131]], 1
// CHECK20-NEXT: [[TMP132:%.*]] = zext i32 [[ADD43]] to i64
// CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP132]])
// CHECK20-NEXT: [[TMP133:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151.region_id, i32 3, i8** [[TMP126]], i8** [[TMP127]], i64* [[TMP128]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK20-NEXT: [[TMP134:%.*]] = icmp ne i32 [[TMP133]], 0
// CHECK20-NEXT: br i1 [[TMP134]], label [[OMP_OFFLOAD_FAILED44:%.*]], label [[OMP_OFFLOAD_CONT45:%.*]]
// CHECK20: omp_offload.failed44:
// CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151(i32 [[TMP105]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]]
// CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT45]]
// CHECK20: omp_offload.cont45:
// CHECK20-NEXT: [[TMP135:%.*]] = load i32, i32* [[M]], align 4
// CHECK20-NEXT: store i32 [[TMP135]], i32* [[DOTCAPTURE_EXPR_46]], align 4
// CHECK20-NEXT: [[TMP136:%.*]] = load i32, i32* [[N]], align 4
// CHECK20-NEXT: store i32 [[TMP136]], i32* [[N_CASTED47]], align 4
// CHECK20-NEXT: [[TMP137:%.*]] = load i32, i32* [[N_CASTED47]], align 4
// CHECK20-NEXT: [[TMP138:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_46]], align 4
// CHECK20-NEXT: store i32 [[TMP138]], i32* [[DOTCAPTURE_EXPR__CASTED48]], align 4
// CHECK20-NEXT: [[TMP139:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED48]], align 4
// CHECK20-NEXT: [[TMP140:%.*]] = mul nuw i32 [[TMP0]], 4
// CHECK20-NEXT: [[TMP141:%.*]] = sext i32 [[TMP140]] to i64
// CHECK20-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 0
// CHECK20-NEXT: [[TMP143:%.*]] = bitcast i8** [[TMP142]] to i32*
// CHECK20-NEXT: store i32 [[TMP137]], i32* [[TMP143]], align 4
// CHECK20-NEXT: [[TMP144:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 0
// CHECK20-NEXT: [[TMP145:%.*]] = bitcast i8** [[TMP144]] to i32*
// CHECK20-NEXT: store i32 [[TMP137]], i32* [[TMP145]], align 4
// CHECK20-NEXT: [[TMP146:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 0
// CHECK20-NEXT: store i64 4, i64* [[TMP146]], align 4
// CHECK20-NEXT: [[TMP147:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 0
// CHECK20-NEXT: store i8* null, i8** [[TMP147]], align 4
// CHECK20-NEXT: [[TMP148:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 1
// CHECK20-NEXT: [[TMP149:%.*]] = bitcast i8** [[TMP148]] to i32*
// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP149]], align 4
// CHECK20-NEXT: [[TMP150:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 1
// CHECK20-NEXT: [[TMP151:%.*]] = bitcast i8** [[TMP150]] to i32*
// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP151]], align 4
// CHECK20-NEXT: [[TMP152:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 1
// CHECK20-NEXT: store i64 4, i64* [[TMP152]], align 4
// CHECK20-NEXT: [[TMP153:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 1
// CHECK20-NEXT: store i8* null, i8** [[TMP153]], align 4
// CHECK20-NEXT: [[TMP154:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 2
// CHECK20-NEXT: [[TMP155:%.*]] = bitcast i8** [[TMP154]] to i32**
// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP155]], align 4
// CHECK20-NEXT: [[TMP156:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 2
// CHECK20-NEXT: [[TMP157:%.*]] = bitcast i8** [[TMP156]] to i32**
// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP157]], align 4
// CHECK20-NEXT: [[TMP158:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 2
// CHECK20-NEXT: store i64 [[TMP141]], i64* [[TMP158]], align 4
// CHECK20-NEXT: [[TMP159:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 2
// CHECK20-NEXT: store i8* null, i8** [[TMP159]], align 4
// CHECK20-NEXT: [[TMP160:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 3
// CHECK20-NEXT: [[TMP161:%.*]] = bitcast i8** [[TMP160]] to i32*
// CHECK20-NEXT: store i32 [[TMP139]], i32* [[TMP161]], align 4
// CHECK20-NEXT: [[TMP162:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 3
// CHECK20-NEXT: [[TMP163:%.*]] = bitcast i8** [[TMP162]] to i32*
// CHECK20-NEXT: store i32 [[TMP139]], i32* [[TMP163]], align 4
// CHECK20-NEXT: [[TMP164:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 3
// CHECK20-NEXT: store i64 4, i64* [[TMP164]], align 4
// CHECK20-NEXT: [[TMP165:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 3
// CHECK20-NEXT: store i8* null, i8** [[TMP165]], align 4
// CHECK20-NEXT: [[TMP166:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 0
// CHECK20-NEXT: [[TMP167:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 0
// CHECK20-NEXT: [[TMP168:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 0
// CHECK20-NEXT: [[TMP169:%.*]] = load i32, i32* [[N]], align 4
// CHECK20-NEXT: store i32 [[TMP169]], i32* [[DOTCAPTURE_EXPR_54]], align 4
// CHECK20-NEXT: [[TMP170:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_54]], align 4
// CHECK20-NEXT: [[SUB56:%.*]] = sub nsw i32 [[TMP170]], 0
// CHECK20-NEXT: [[DIV57:%.*]] = sdiv i32 [[SUB56]], 1
// CHECK20-NEXT: [[SUB58:%.*]] = sub nsw i32 [[DIV57]], 1
// CHECK20-NEXT: store i32 [[SUB58]], i32* [[DOTCAPTURE_EXPR_55]], align 4
// CHECK20-NEXT: [[TMP171:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_55]], align 4
// CHECK20-NEXT: [[ADD59:%.*]] = add nsw i32 [[TMP171]], 1
// CHECK20-NEXT: [[TMP172:%.*]] = zext i32 [[ADD59]] to i64
// CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP172]])
// CHECK20-NEXT: [[TMP173:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155.region_id, i32 4, i8** [[TMP166]], i8** [[TMP167]], i64* [[TMP168]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK20-NEXT: [[TMP174:%.*]] = icmp ne i32 [[TMP173]], 0
// CHECK20-NEXT: br i1 [[TMP174]], label [[OMP_OFFLOAD_FAILED60:%.*]], label [[OMP_OFFLOAD_CONT61:%.*]]
// CHECK20: omp_offload.failed60:
// CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155(i32 [[TMP137]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP139]]) #[[ATTR3]]
// CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT61]]
// CHECK20: omp_offload.cont61:
// CHECK20-NEXT: [[TMP175:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4
// CHECK20-NEXT: [[CALL:%.*]] = call i32 @_Z5tmainIiLi10EEiT_(i32 [[TMP175]])
// CHECK20-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4
// CHECK20-NEXT: [[TMP176:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
// CHECK20-NEXT: call void @llvm.stackrestore(i8* [[TMP176]])
// CHECK20-NEXT: [[TMP177:%.*]] = load i32, i32* [[RETVAL]], align 4
// CHECK20-NEXT: ret i32 [[TMP177]]
//
//
// CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139
// CHECK20-SAME: (i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2:[0-9]+]] {
// CHECK20-NEXT: entry:
// CHECK20-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
// CHECK20-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK20-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK20-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK20-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK20-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK20-NEXT: store i32 [[TMP2]], i32* [[N_CASTED]], align 4
// CHECK20-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]])
// CHECK20-NEXT: ret void
//
//
// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK20-NEXT: entry:
// CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[I3:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
// CHECK20-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK20-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK20-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK20-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK20-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK20-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK20-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK20-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK20-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK20-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK20-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK20-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK20-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK20-NEXT: store i32 0, i32* [[I]], align 4
// CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK20-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK20-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK20: omp.precond.then:
// CHECK20-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK20-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK20-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK20-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK20-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK20-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
// CHECK20-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK20-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK20-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
// CHECK20-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK20: cond.true:
// CHECK20-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK20-NEXT: br label [[COND_END:%.*]]
// CHECK20: cond.false:
// CHECK20-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: br label [[COND_END]]
// CHECK20: cond.end:
// CHECK20-NEXT: [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
// CHECK20-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK20-NEXT: store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK20: omp.inner.for.cond:
// CHECK20-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
// CHECK20-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK20: omp.inner.for.body:
// CHECK20-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK20-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: [[TMP17:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK20-NEXT: store i32 [[TMP17]], i32* [[N_CASTED]], align 4
// CHECK20-NEXT: [[TMP18:%.*]] = load i32, i32* [[N_CASTED]], align 4
// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]])
// CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK20: omp.inner.for.inc:
// CHECK20-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK20-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
// CHECK20-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK20: omp.inner.for.end:
// CHECK20-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK20: omp.loop.exit:
// CHECK20-NEXT: [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK20-NEXT: [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
// CHECK20-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
// CHECK20-NEXT: br label [[OMP_PRECOND_END]]
// CHECK20: omp.precond.end:
// CHECK20-NEXT: ret void
//
//
// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK20-NEXT: entry:
// CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[I3:%.*]] = alloca i32, align 4
// CHECK20-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK20-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK20-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK20-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK20-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK20-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK20-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK20-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK20-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK20-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK20-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK20-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK20-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK20-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK20-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK20-NEXT: store i32 0, i32* [[I]], align 4
// CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK20-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK20-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK20: omp.precond.then:
// CHECK20-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK20-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK20-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
// CHECK20-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK20-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK20-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_LB]], align 4
// CHECK20-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
// CHECK20-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK20-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK20-NEXT: [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK20-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
// CHECK20-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP9]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK20-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK20-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK20-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
// CHECK20-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK20: cond.true:
// CHECK20-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK20-NEXT: br label [[COND_END:%.*]]
// CHECK20: cond.false:
// CHECK20-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK20-NEXT: br label [[COND_END]]
// CHECK20: cond.end:
// CHECK20-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
// CHECK20-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK20-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK20-NEXT: store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK20: omp.inner.for.cond:
// CHECK20-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK20-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
// CHECK20-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK20: omp.inner.for.body:
// CHECK20-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP17]], 1
// CHECK20-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK20-NEXT: store i32 [[ADD]], i32* [[I3]], align 4
// CHECK20-NEXT: [[TMP18:%.*]] = load i32, i32* [[I3]], align 4
// CHECK20-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 [[TMP18]]
// CHECK20-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK20-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK20: omp.body.continue:
// CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK20: omp.inner.for.inc:
// CHECK20-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP19]], 1
// CHECK20-NEXT: store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK20: omp.inner.for.end:
// CHECK20-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK20: omp.loop.exit:
// CHECK20-NEXT: [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK20-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
// CHECK20-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
// CHECK20-NEXT: br label [[OMP_PRECOND_END]]
// CHECK20: omp.precond.end:
// CHECK20-NEXT: ret void
//
//
// CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143
// CHECK20-SAME: (i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK20-NEXT: entry:
// CHECK20-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
// CHECK20-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK20-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK20-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK20-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK20-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK20-NEXT: store i32 [[TMP2]], i32* [[N_CASTED]], align 4
// CHECK20-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]])
// CHECK20-NEXT: ret void
//
//
// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..2
// CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK20-NEXT: entry:
// CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[I3:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
// CHECK20-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK20-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK20-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK20-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK20-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK20-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK20-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK20-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK20-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK20-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK20-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK20-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK20-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK20-NEXT: store i32 0, i32* [[I]], align 4
// CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK20-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK20-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK20: omp.precond.then:
// CHECK20-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK20-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK20-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK20-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK20-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK20-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
// CHECK20-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK20-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK20-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
// CHECK20-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK20: cond.true:
// CHECK20-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK20-NEXT: br label [[COND_END:%.*]]
// CHECK20: cond.false:
// CHECK20-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: br label [[COND_END]]
// CHECK20: cond.end:
// CHECK20-NEXT: [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
// CHECK20-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK20-NEXT: store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK20: omp.inner.for.cond:
// CHECK20-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
// CHECK20-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK20: omp.inner.for.body:
// CHECK20-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK20-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: [[TMP17:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK20-NEXT: store i32 [[TMP17]], i32* [[N_CASTED]], align 4
// CHECK20-NEXT: [[TMP18:%.*]] = load i32, i32* [[N_CASTED]], align 4
// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]])
// CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK20: omp.inner.for.inc:
// CHECK20-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK20-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
// CHECK20-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK20: omp.inner.for.end:
// CHECK20-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK20: omp.loop.exit:
// CHECK20-NEXT: [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK20-NEXT: [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
// CHECK20-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
// CHECK20-NEXT: br label [[OMP_PRECOND_END]]
// CHECK20: omp.precond.end:
// CHECK20-NEXT: ret void
//
//
// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..3
// CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK20-NEXT: entry:
// CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[I3:%.*]] = alloca i32, align 4
// CHECK20-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK20-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK20-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK20-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK20-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK20-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK20-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK20-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK20-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK20-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK20-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK20-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK20-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK20-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK20-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK20-NEXT: store i32 0, i32* [[I]], align 4
// CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK20-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK20-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK20: omp.precond.then:
// CHECK20-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK20-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK20-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
// CHECK20-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK20-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK20-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_LB]], align 4
// CHECK20-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
// CHECK20-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK20-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK20-NEXT: [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK20-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
// CHECK20-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP9]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK20-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK20-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK20-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
// CHECK20-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK20: cond.true:
// CHECK20-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK20-NEXT: br label [[COND_END:%.*]]
// CHECK20: cond.false:
// CHECK20-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK20-NEXT: br label [[COND_END]]
// CHECK20: cond.end:
// CHECK20-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
// CHECK20-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK20-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK20-NEXT: store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK20: omp.inner.for.cond:
// CHECK20-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK20-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
// CHECK20-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK20: omp.inner.for.body:
// CHECK20-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP17]], 1
// CHECK20-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK20-NEXT: store i32 [[ADD]], i32* [[I3]], align 4
// CHECK20-NEXT: [[TMP18:%.*]] = load i32, i32* [[I3]], align 4
// CHECK20-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 [[TMP18]]
// CHECK20-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK20-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK20: omp.body.continue:
// CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK20: omp.inner.for.inc:
// CHECK20-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP19]], 1
// CHECK20-NEXT: store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK20: omp.inner.for.end:
// CHECK20-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK20: omp.loop.exit:
// CHECK20-NEXT: [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK20-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
// CHECK20-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
// CHECK20-NEXT: br label [[OMP_PRECOND_END]]
// CHECK20: omp.precond.end:
// CHECK20-NEXT: ret void
//
//
// CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147
// CHECK20-SAME: (i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK20-NEXT: entry:
// CHECK20-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
// CHECK20-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK20-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK20-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK20-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK20-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK20-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK20-NEXT: store i32 [[TMP2]], i32* [[N_CASTED]], align 4
// CHECK20-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
// CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK20-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK20-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]])
// CHECK20-NEXT: ret void
//
//
// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..5
// CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK20-NEXT: entry:
// CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[I4:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
// CHECK20-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK20-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK20-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK20-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK20-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK20-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK20-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK20-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK20-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK20-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK20-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK20-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK20-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK20-NEXT: store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK20-NEXT: store i32 0, i32* [[I]], align 4
// CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK20-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK20-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK20: omp.precond.then:
// CHECK20-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK20-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK20-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK20-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK20-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK20-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK20-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
// CHECK20-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP6]])
// CHECK20-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK20-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
// CHECK20-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK20: cond.true:
// CHECK20-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK20-NEXT: br label [[COND_END:%.*]]
// CHECK20: cond.false:
// CHECK20-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: br label [[COND_END]]
// CHECK20: cond.end:
// CHECK20-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
// CHECK20-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK20-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK20: omp.inner.for.cond:
// CHECK20-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK20-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP15]], 1
// CHECK20-NEXT: [[CMP6:%.*]] = icmp slt i32 [[TMP14]], [[ADD]]
// CHECK20-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK20: omp.inner.for.body:
// CHECK20-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK20-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: [[TMP18:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK20-NEXT: store i32 [[TMP18]], i32* [[N_CASTED]], align 4
// CHECK20-NEXT: [[TMP19:%.*]] = load i32, i32* [[N_CASTED]], align 4
// CHECK20-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK20-NEXT: store i32 [[TMP20]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK20-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*, i32)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32 [[TMP19]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP21]])
// CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK20: omp.inner.for.inc:
// CHECK20-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK20-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
// CHECK20-NEXT: store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK20-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK20-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP24]], [[TMP25]]
// CHECK20-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_COMB_LB]], align 4
// CHECK20-NEXT: [[TMP26:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK20-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP26]], [[TMP27]]
// CHECK20-NEXT: store i32 [[ADD9]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK20-NEXT: [[CMP10:%.*]] = icmp sgt i32 [[TMP28]], [[TMP29]]
// CHECK20-NEXT: br i1 [[CMP10]], label [[COND_TRUE11:%.*]], label [[COND_FALSE12:%.*]]
// CHECK20: cond.true11:
// CHECK20-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK20-NEXT: br label [[COND_END13:%.*]]
// CHECK20: cond.false12:
// CHECK20-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: br label [[COND_END13]]
// CHECK20: cond.end13:
// CHECK20-NEXT: [[COND14:%.*]] = phi i32 [ [[TMP30]], [[COND_TRUE11]] ], [ [[TMP31]], [[COND_FALSE12]] ]
// CHECK20-NEXT: store i32 [[COND14]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK20-NEXT: store i32 [[TMP32]], i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK20: omp.inner.for.end:
// CHECK20-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK20: omp.loop.exit:
// CHECK20-NEXT: [[TMP33:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK20-NEXT: [[TMP34:%.*]] = load i32, i32* [[TMP33]], align 4
// CHECK20-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP34]])
// CHECK20-NEXT: br label [[OMP_PRECOND_END]]
// CHECK20: omp.precond.end:
// CHECK20-NEXT: ret void
//
//
// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..6
// CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK20-NEXT: entry:
// CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[I4:%.*]] = alloca i32, align 4
// CHECK20-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK20-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK20-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK20-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK20-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK20-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK20-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK20-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK20-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK20-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK20-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK20-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK20-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK20-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK20-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK20-NEXT: store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK20-NEXT: store i32 0, i32* [[I]], align 4
// CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK20-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK20-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK20: omp.precond.then:
// CHECK20-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK20-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK20-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
// CHECK20-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK20-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK20-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_LB]], align 4
// CHECK20-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
// CHECK20-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK20-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK20-NEXT: [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK20-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
// CHECK20-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP9]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK20-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK20-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK20-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
// CHECK20-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK20: cond.true:
// CHECK20-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK20-NEXT: br label [[COND_END:%.*]]
// CHECK20: cond.false:
// CHECK20-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK20-NEXT: br label [[COND_END]]
// CHECK20: cond.end:
// CHECK20-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
// CHECK20-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK20-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK20-NEXT: store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK20: omp.inner.for.cond:
// CHECK20-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK20-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
// CHECK20-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK20: omp.inner.for.body:
// CHECK20-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP17]], 1
// CHECK20-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK20-NEXT: store i32 [[ADD]], i32* [[I4]], align 4
// CHECK20-NEXT: [[TMP18:%.*]] = load i32, i32* [[I4]], align 4
// CHECK20-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 [[TMP18]]
// CHECK20-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK20-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK20: omp.body.continue:
// CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK20: omp.inner.for.inc:
// CHECK20-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP19]], 1
// CHECK20-NEXT: store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK20: omp.inner.for.end:
// CHECK20-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK20: omp.loop.exit:
// CHECK20-NEXT: [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK20-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
// CHECK20-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]])
// CHECK20-NEXT: br label [[OMP_PRECOND_END]]
// CHECK20: omp.precond.end:
// CHECK20-NEXT: ret void
//
//
// CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151
// CHECK20-SAME: (i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK20-NEXT: entry:
// CHECK20-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
// CHECK20-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK20-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK20-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK20-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK20-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK20-NEXT: store i32 [[TMP2]], i32* [[N_CASTED]], align 4
// CHECK20-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]])
// CHECK20-NEXT: ret void
//
//
// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..8
// CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK20-NEXT: entry:
// CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[I3:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
// CHECK20-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK20-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK20-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK20-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK20-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK20-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK20-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK20-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK20-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK20-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK20-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK20-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK20-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK20-NEXT: store i32 0, i32* [[I]], align 4
// CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK20-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK20-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK20: omp.precond.then:
// CHECK20-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK20-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK20-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK20-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK20-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK20-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
// CHECK20-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK20-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK20-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
// CHECK20-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK20: cond.true:
// CHECK20-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK20-NEXT: br label [[COND_END:%.*]]
// CHECK20: cond.false:
// CHECK20-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: br label [[COND_END]]
// CHECK20: cond.end:
// CHECK20-NEXT: [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
// CHECK20-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK20-NEXT: store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK20: omp.inner.for.cond:
// CHECK20-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
// CHECK20-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK20: omp.inner.for.body:
// CHECK20-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK20-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: [[TMP17:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK20-NEXT: store i32 [[TMP17]], i32* [[N_CASTED]], align 4
// CHECK20-NEXT: [[TMP18:%.*]] = load i32, i32* [[N_CASTED]], align 4
// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]])
// CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK20: omp.inner.for.inc:
// CHECK20-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK20-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
// CHECK20-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK20: omp.inner.for.end:
// CHECK20-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK20: omp.loop.exit:
// CHECK20-NEXT: [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK20-NEXT: [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
// CHECK20-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]])
// CHECK20-NEXT: br label [[OMP_PRECOND_END]]
// CHECK20: omp.precond.end:
// CHECK20-NEXT: ret void
//
//
// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..9
// CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
// CHECK20-NEXT: entry:
// CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[I3:%.*]] = alloca i32, align 4
// CHECK20-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK20-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK20-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK20-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK20-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK20-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK20-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK20-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK20-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK20-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK20-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK20-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK20-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK20-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK20-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK20-NEXT: store i32 0, i32* [[I]], align 4
// CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK20-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK20-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK20: omp.precond.then:
// CHECK20-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK20-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK20-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
// CHECK20-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK20-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK20-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_LB]], align 4
// CHECK20-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
// CHECK20-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK20-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK20-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK20-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK20-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK20-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
// CHECK20-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP11]], i32 1073741859, i32 [[TMP8]], i32 [[TMP9]], i32 1, i32 1)
// CHECK20-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK20: omp.dispatch.cond:
// CHECK20-NEXT: [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK20-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
// CHECK20-NEXT: [[TMP14:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP13]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
// CHECK20-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP14]], 0
// CHECK20-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK20: omp.dispatch.body:
// CHECK20-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK20-NEXT: store i32 [[TMP15]], i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK20: omp.inner.for.cond:
// CHECK20-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !16
// CHECK20-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !16
// CHECK20-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
// CHECK20-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK20: omp.inner.for.body:
// CHECK20-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !16
// CHECK20-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
// CHECK20-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK20-NEXT: store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !16
// CHECK20-NEXT: [[TMP19:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !16
// CHECK20-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 [[TMP19]]
// CHECK20-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !16
// CHECK20-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK20: omp.body.continue:
// CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK20: omp.inner.for.inc:
// CHECK20-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !16
// CHECK20-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP20]], 1
// CHECK20-NEXT: store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !16
// CHECK20-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP17:![0-9]+]]
// CHECK20: omp.inner.for.end:
// CHECK20-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK20: omp.dispatch.inc:
// CHECK20-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK20: omp.dispatch.end:
// CHECK20-NEXT: br label [[OMP_PRECOND_END]]
// CHECK20: omp.precond.end:
// CHECK20-NEXT: ret void
//
//
// CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155
// CHECK20-SAME: (i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK20-NEXT: entry:
// CHECK20-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
// CHECK20-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK20-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK20-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK20-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK20-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK20-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK20-NEXT: store i32 [[TMP2]], i32* [[N_CASTED]], align 4
// CHECK20-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4
// CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK20-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK20-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]])
// CHECK20-NEXT: ret void
//
//
// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..11
// CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK20-NEXT: entry:
// CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[I4:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
// CHECK20-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK20-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK20-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK20-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK20-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK20-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK20-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK20-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK20-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK20-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK20-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK20-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK20-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK20-NEXT: store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK20-NEXT: store i32 0, i32* [[I]], align 4
// CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK20-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK20-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK20: omp.precond.then:
// CHECK20-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK20-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK20-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK20-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK20-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK20-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
// CHECK20-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK20-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK20-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
// CHECK20-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK20: cond.true:
// CHECK20-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK20-NEXT: br label [[COND_END:%.*]]
// CHECK20: cond.false:
// CHECK20-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: br label [[COND_END]]
// CHECK20: cond.end:
// CHECK20-NEXT: [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
// CHECK20-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK20-NEXT: store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK20: omp.inner.for.cond:
// CHECK20-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
// CHECK20-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK20: omp.inner.for.body:
// CHECK20-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK20-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: [[TMP17:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK20-NEXT: store i32 [[TMP17]], i32* [[N_CASTED]], align 4
// CHECK20-NEXT: [[TMP18:%.*]] = load i32, i32* [[N_CASTED]], align 4
// CHECK20-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK20-NEXT: store i32 [[TMP19]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK20-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*, i32)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP20]])
// CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK20: omp.inner.for.inc:
// CHECK20-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK20-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
// CHECK20-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK20: omp.inner.for.end:
// CHECK20-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK20: omp.loop.exit:
// CHECK20-NEXT: [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK20-NEXT: [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
// CHECK20-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]])
// CHECK20-NEXT: br label [[OMP_PRECOND_END]]
// CHECK20: omp.precond.end:
// CHECK20-NEXT: ret void
//
//
// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..12
// CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], i32 [[N:%.*]], i32 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK20-NEXT: entry:
// CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[I4:%.*]] = alloca i32, align 4
// CHECK20-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK20-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK20-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK20-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK20-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK20-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4
// CHECK20-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
// CHECK20-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK20-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4
// CHECK20-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
// CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK20-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK20-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK20-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK20-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK20-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK20-NEXT: store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK20-NEXT: store i32 0, i32* [[I]], align 4
// CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK20-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK20-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK20: omp.precond.then:
// CHECK20-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK20-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK20-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
// CHECK20-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK20-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK20-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_LB]], align 4
// CHECK20-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_UB]], align 4
// CHECK20-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK20-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK20-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK20-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK20-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK20-NEXT: [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK20-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
// CHECK20-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP12]], i32 1073741859, i32 [[TMP9]], i32 [[TMP10]], i32 1, i32 [[TMP8]])
// CHECK20-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK20: omp.dispatch.cond:
// CHECK20-NEXT: [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK20-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
// CHECK20-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
// CHECK20-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP15]], 0
// CHECK20-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK20: omp.dispatch.body:
// CHECK20-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK20-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK20: omp.inner.for.cond:
// CHECK20-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
// CHECK20-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !19
// CHECK20-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
// CHECK20-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK20: omp.inner.for.body:
// CHECK20-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
// CHECK20-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
// CHECK20-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK20-NEXT: store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !19
// CHECK20-NEXT: [[TMP20:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !19
// CHECK20-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 [[TMP20]]
// CHECK20-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !19
// CHECK20-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK20: omp.body.continue:
// CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK20: omp.inner.for.inc:
// CHECK20-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
// CHECK20-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP21]], 1
// CHECK20-NEXT: store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19
// CHECK20-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]]
// CHECK20: omp.inner.for.end:
// CHECK20-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK20: omp.dispatch.inc:
// CHECK20-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK20: omp.dispatch.end:
// CHECK20-NEXT: br label [[OMP_PRECOND_END]]
// CHECK20: omp.precond.end:
// CHECK20-NEXT: ret void
//
//
// CHECK20-LABEL: define {{[^@]+}}@_Z5tmainIiLi10EEiT_
// CHECK20-SAME: (i32 [[ARGC:%.*]]) #[[ATTR4:[0-9]+]] comdat {
// CHECK20-NEXT: entry:
// CHECK20-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[A:%.*]] = alloca [10 x i32], align 4
// CHECK20-NEXT: [[M:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 4
// CHECK20-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 4
// CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 4
// CHECK20-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS1:%.*]] = alloca [1 x i8*], align 4
// CHECK20-NEXT: [[DOTOFFLOAD_PTRS2:%.*]] = alloca [1 x i8*], align 4
// CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS3:%.*]] = alloca [1 x i8*], align 4
// CHECK20-NEXT: [[_TMP4:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS7:%.*]] = alloca [2 x i8*], align 4
// CHECK20-NEXT: [[DOTOFFLOAD_PTRS8:%.*]] = alloca [2 x i8*], align 4
// CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS9:%.*]] = alloca [2 x i8*], align 4
// CHECK20-NEXT: [[_TMP10:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS13:%.*]] = alloca [1 x i8*], align 4
// CHECK20-NEXT: [[DOTOFFLOAD_PTRS14:%.*]] = alloca [1 x i8*], align 4
// CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS15:%.*]] = alloca [1 x i8*], align 4
// CHECK20-NEXT: [[_TMP16:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTCAPTURE_EXPR_19:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTCAPTURE_EXPR__CASTED20:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS21:%.*]] = alloca [2 x i8*], align 4
// CHECK20-NEXT: [[DOTOFFLOAD_PTRS22:%.*]] = alloca [2 x i8*], align 4
// CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS23:%.*]] = alloca [2 x i8*], align 4
// CHECK20-NEXT: [[_TMP24:%.*]] = alloca i32, align 4
// CHECK20-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
// CHECK20-NEXT: store i32 10, i32* [[M]], align 4
// CHECK20-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK20-NEXT: [[TMP1:%.*]] = bitcast i8** [[TMP0]] to [10 x i32]**
// CHECK20-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP1]], align 4
// CHECK20-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK20-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to [10 x i32]**
// CHECK20-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP3]], align 4
// CHECK20-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// CHECK20-NEXT: store i8* null, i8** [[TMP4]], align 4
// CHECK20-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK20-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
// CHECK20-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK20-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0
// CHECK20-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK20: omp_offload.failed:
// CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112([10 x i32]* [[A]]) #[[ATTR3]]
// CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK20: omp_offload.cont:
// CHECK20-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
// CHECK20-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to [10 x i32]**
// CHECK20-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP10]], align 4
// CHECK20-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
// CHECK20-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to [10 x i32]**
// CHECK20-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP12]], align 4
// CHECK20-NEXT: [[TMP13:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i32 0, i32 0
// CHECK20-NEXT: store i8* null, i8** [[TMP13]], align 4
// CHECK20-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
// CHECK20-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
// CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
// CHECK20-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK20-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
// CHECK20-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]]
// CHECK20: omp_offload.failed5:
// CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116([10 x i32]* [[A]]) #[[ATTR3]]
// CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT6]]
// CHECK20: omp_offload.cont6:
// CHECK20-NEXT: [[TMP18:%.*]] = load i32, i32* [[M]], align 4
// CHECK20-NEXT: store i32 [[TMP18]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK20-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK20-NEXT: store i32 [[TMP19]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK20-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK20-NEXT: [[TMP21:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0
// CHECK20-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to [10 x i32]**
// CHECK20-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP22]], align 4
// CHECK20-NEXT: [[TMP23:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0
// CHECK20-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to [10 x i32]**
// CHECK20-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP24]], align 4
// CHECK20-NEXT: [[TMP25:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i32 0, i32 0
// CHECK20-NEXT: store i8* null, i8** [[TMP25]], align 4
// CHECK20-NEXT: [[TMP26:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 1
// CHECK20-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i32*
// CHECK20-NEXT: store i32 [[TMP20]], i32* [[TMP27]], align 4
// CHECK20-NEXT: [[TMP28:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 1
// CHECK20-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32*
// CHECK20-NEXT: store i32 [[TMP20]], i32* [[TMP29]], align 4
// CHECK20-NEXT: [[TMP30:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i32 0, i32 1
// CHECK20-NEXT: store i8* null, i8** [[TMP30]], align 4
// CHECK20-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0
// CHECK20-NEXT: [[TMP32:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0
// CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
// CHECK20-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.23, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.24, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK20-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0
// CHECK20-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]]
// CHECK20: omp_offload.failed11:
// CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120([10 x i32]* [[A]], i32 [[TMP20]]) #[[ATTR3]]
// CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT12]]
// CHECK20: omp_offload.cont12:
// CHECK20-NEXT: [[TMP35:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0
// CHECK20-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to [10 x i32]**
// CHECK20-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP36]], align 4
// CHECK20-NEXT: [[TMP37:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0
// CHECK20-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to [10 x i32]**
// CHECK20-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP38]], align 4
// CHECK20-NEXT: [[TMP39:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 0
// CHECK20-NEXT: store i8* null, i8** [[TMP39]], align 4
// CHECK20-NEXT: [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0
// CHECK20-NEXT: [[TMP41:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0
// CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
// CHECK20-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124.region_id, i32 1, i8** [[TMP40]], i8** [[TMP41]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.27, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.28, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK20-NEXT: [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0
// CHECK20-NEXT: br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]]
// CHECK20: omp_offload.failed17:
// CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124([10 x i32]* [[A]]) #[[ATTR3]]
// CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT18]]
// CHECK20: omp_offload.cont18:
// CHECK20-NEXT: [[TMP44:%.*]] = load i32, i32* [[M]], align 4
// CHECK20-NEXT: store i32 [[TMP44]], i32* [[DOTCAPTURE_EXPR_19]], align 4
// CHECK20-NEXT: [[TMP45:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_19]], align 4
// CHECK20-NEXT: store i32 [[TMP45]], i32* [[DOTCAPTURE_EXPR__CASTED20]], align 4
// CHECK20-NEXT: [[TMP46:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED20]], align 4
// CHECK20-NEXT: [[TMP47:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0
// CHECK20-NEXT: [[TMP48:%.*]] = bitcast i8** [[TMP47]] to [10 x i32]**
// CHECK20-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP48]], align 4
// CHECK20-NEXT: [[TMP49:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0
// CHECK20-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to [10 x i32]**
// CHECK20-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP50]], align 4
// CHECK20-NEXT: [[TMP51:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 0
// CHECK20-NEXT: store i8* null, i8** [[TMP51]], align 4
// CHECK20-NEXT: [[TMP52:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 1
// CHECK20-NEXT: [[TMP53:%.*]] = bitcast i8** [[TMP52]] to i32*
// CHECK20-NEXT: store i32 [[TMP46]], i32* [[TMP53]], align 4
// CHECK20-NEXT: [[TMP54:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 1
// CHECK20-NEXT: [[TMP55:%.*]] = bitcast i8** [[TMP54]] to i32*
// CHECK20-NEXT: store i32 [[TMP46]], i32* [[TMP55]], align 4
// CHECK20-NEXT: [[TMP56:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 1
// CHECK20-NEXT: store i8* null, i8** [[TMP56]], align 4
// CHECK20-NEXT: [[TMP57:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0
// CHECK20-NEXT: [[TMP58:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0
// CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10)
// CHECK20-NEXT: [[TMP59:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128.region_id, i32 2, i8** [[TMP57]], i8** [[TMP58]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.31, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.32, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0)
// CHECK20-NEXT: [[TMP60:%.*]] = icmp ne i32 [[TMP59]], 0
// CHECK20-NEXT: br i1 [[TMP60]], label [[OMP_OFFLOAD_FAILED25:%.*]], label [[OMP_OFFLOAD_CONT26:%.*]]
// CHECK20: omp_offload.failed25:
// CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128([10 x i32]* [[A]], i32 [[TMP46]]) #[[ATTR3]]
// CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT26]]
// CHECK20: omp_offload.cont26:
// CHECK20-NEXT: ret i32 0
//
//
// CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112
// CHECK20-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK20-NEXT: entry:
// CHECK20-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK20-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
// CHECK20-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]])
// CHECK20-NEXT: ret void
//
//
// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..14
// CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK20-NEXT: entry:
// CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK20-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK20-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK20-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK20-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
// CHECK20-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
// CHECK20-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK20-NEXT: store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK20-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK20-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK20-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK20-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
// CHECK20-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK20: cond.true:
// CHECK20-NEXT: br label [[COND_END:%.*]]
// CHECK20: cond.false:
// CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: br label [[COND_END]]
// CHECK20: cond.end:
// CHECK20-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK20-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK20-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK20: omp.inner.for.cond:
// CHECK20-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK20-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK20: omp.inner.for.body:
// CHECK20-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK20-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]])
// CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK20: omp.inner.for.inc:
// CHECK20-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK20-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
// CHECK20-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK20: omp.inner.for.end:
// CHECK20-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK20: omp.loop.exit:
// CHECK20-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK20-NEXT: ret void
//
//
// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..15
// CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK20-NEXT: entry:
// CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK20-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK20-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK20-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK20-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK20-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK20-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
// CHECK20-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
// CHECK20-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK20-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4
// CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK20-NEXT: store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
// CHECK20-NEXT: store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
// CHECK20-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK20-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK20-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK20-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK20-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK20-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 9
// CHECK20-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK20: cond.true:
// CHECK20-NEXT: br label [[COND_END:%.*]]
// CHECK20: cond.false:
// CHECK20-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK20-NEXT: br label [[COND_END]]
// CHECK20: cond.end:
// CHECK20-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
// CHECK20-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK20-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK20-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK20: omp.inner.for.cond:
// CHECK20-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK20-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
// CHECK20-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK20: omp.inner.for.body:
// CHECK20-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
// CHECK20-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK20-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK20-NEXT: [[TMP11:%.*]] = load i32, i32* [[I]], align 4
// CHECK20-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 [[TMP11]]
// CHECK20-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK20-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK20: omp.body.continue:
// CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK20: omp.inner.for.inc:
// CHECK20-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP12]], 1
// CHECK20-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK20: omp.inner.for.end:
// CHECK20-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK20: omp.loop.exit:
// CHECK20-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
// CHECK20-NEXT: ret void
//
//
// CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116
// CHECK20-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK20-NEXT: entry:
// CHECK20-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK20-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
// CHECK20-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..17 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]])
// CHECK20-NEXT: ret void
//
//
// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..17
// CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK20-NEXT: entry:
// CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK20-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK20-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK20-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK20-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
// CHECK20-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
// CHECK20-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK20-NEXT: store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK20-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK20-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK20-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK20-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
// CHECK20-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK20: cond.true:
// CHECK20-NEXT: br label [[COND_END:%.*]]
// CHECK20: cond.false:
// CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: br label [[COND_END]]
// CHECK20: cond.end:
// CHECK20-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK20-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK20-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK20: omp.inner.for.cond:
// CHECK20-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK20-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK20: omp.inner.for.body:
// CHECK20-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK20-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]])
// CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK20: omp.inner.for.inc:
// CHECK20-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK20-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
// CHECK20-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK20: omp.inner.for.end:
// CHECK20-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK20: omp.loop.exit:
// CHECK20-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK20-NEXT: ret void
//
//
// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..18
// CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK20-NEXT: entry:
// CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK20-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK20-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK20-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK20-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK20-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK20-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
// CHECK20-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
// CHECK20-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK20-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4
// CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK20-NEXT: store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
// CHECK20-NEXT: store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
// CHECK20-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK20-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK20-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK20-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK20-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK20-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 9
// CHECK20-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK20: cond.true:
// CHECK20-NEXT: br label [[COND_END:%.*]]
// CHECK20: cond.false:
// CHECK20-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK20-NEXT: br label [[COND_END]]
// CHECK20: cond.end:
// CHECK20-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
// CHECK20-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK20-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK20-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK20: omp.inner.for.cond:
// CHECK20-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK20-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
// CHECK20-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK20: omp.inner.for.body:
// CHECK20-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
// CHECK20-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK20-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK20-NEXT: [[TMP11:%.*]] = load i32, i32* [[I]], align 4
// CHECK20-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 [[TMP11]]
// CHECK20-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK20-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK20: omp.body.continue:
// CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK20: omp.inner.for.inc:
// CHECK20-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP12]], 1
// CHECK20-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK20: omp.inner.for.end:
// CHECK20-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK20: omp.loop.exit:
// CHECK20-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
// CHECK20-NEXT: ret void
//
//
// CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120
// CHECK20-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK20-NEXT: entry:
// CHECK20-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK20-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
// CHECK20-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
// CHECK20-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK20-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
// CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK20-NEXT: store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..21 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP2]])
// CHECK20-NEXT: ret void
//
//
// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..21
// CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK20-NEXT: entry:
// CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK20-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
// CHECK20-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK20-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK20-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
// CHECK20-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK20-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
// CHECK20-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK20-NEXT: store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK20-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK20-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK20-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK20-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
// CHECK20-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK20: cond.true:
// CHECK20-NEXT: br label [[COND_END:%.*]]
// CHECK20: cond.false:
// CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: br label [[COND_END]]
// CHECK20: cond.end:
// CHECK20-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK20-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK20-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK20: omp.inner.for.cond:
// CHECK20-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK20-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK20: omp.inner.for.body:
// CHECK20-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK20-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK20-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK20-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]])
// CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK20: omp.inner.for.inc:
// CHECK20-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK20-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
// CHECK20-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK20: omp.inner.for.end:
// CHECK20-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK20: omp.loop.exit:
// CHECK20-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK20-NEXT: ret void
//
//
// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..22
// CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK20-NEXT: entry:
// CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK20-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK20-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK20-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK20-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK20-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK20-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
// CHECK20-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK20-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
// CHECK20-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK20-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4
// CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK20-NEXT: store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
// CHECK20-NEXT: store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
// CHECK20-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK20-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK20-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK20-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK20-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
// CHECK20-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP5]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP3]])
// CHECK20-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK20: omp.dispatch.cond:
// CHECK20-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK20-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK20-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], [[TMP7]]
// CHECK20-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK20: cond.true:
// CHECK20-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK20-NEXT: br label [[COND_END:%.*]]
// CHECK20: cond.false:
// CHECK20-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK20-NEXT: br label [[COND_END]]
// CHECK20: cond.end:
// CHECK20-NEXT: [[COND:%.*]] = phi i32 [ [[TMP8]], [[COND_TRUE]] ], [ [[TMP9]], [[COND_FALSE]] ]
// CHECK20-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
// CHECK20-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK20-NEXT: store i32 [[TMP10]], i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK20-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]]
// CHECK20-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK20: omp.dispatch.body:
// CHECK20-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK20: omp.inner.for.cond:
// CHECK20-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK20-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
// CHECK20-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK20: omp.inner.for.body:
// CHECK20-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP15]], 1
// CHECK20-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK20-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK20-NEXT: [[TMP16:%.*]] = load i32, i32* [[I]], align 4
// CHECK20-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 [[TMP16]]
// CHECK20-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
// CHECK20-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK20: omp.body.continue:
// CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK20: omp.inner.for.inc:
// CHECK20-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP17]], 1
// CHECK20-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK20: omp.inner.for.end:
// CHECK20-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK20: omp.dispatch.inc:
// CHECK20-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK20-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK20-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
// CHECK20-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_LB]], align 4
// CHECK20-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK20-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK20-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
// CHECK20-NEXT: store i32 [[ADD5]], i32* [[DOTOMP_UB]], align 4
// CHECK20-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK20: omp.dispatch.end:
// CHECK20-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
// CHECK20-NEXT: ret void
//
//
// CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124
// CHECK20-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK20-NEXT: entry:
// CHECK20-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK20-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
// CHECK20-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..25 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]])
// CHECK20-NEXT: ret void
//
//
// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..25
// CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK20-NEXT: entry:
// CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK20-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK20-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK20-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK20-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
// CHECK20-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
// CHECK20-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK20-NEXT: store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK20-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK20-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK20-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK20-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
// CHECK20-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK20: cond.true:
// CHECK20-NEXT: br label [[COND_END:%.*]]
// CHECK20: cond.false:
// CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: br label [[COND_END]]
// CHECK20: cond.end:
// CHECK20-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK20-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK20-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK20: omp.inner.for.cond:
// CHECK20-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK20-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK20: omp.inner.for.body:
// CHECK20-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK20-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]])
// CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK20: omp.inner.for.inc:
// CHECK20-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK20-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
// CHECK20-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK20: omp.inner.for.end:
// CHECK20-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK20: omp.loop.exit:
// CHECK20-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK20-NEXT: ret void
//
//
// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..26
// CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
// CHECK20-NEXT: entry:
// CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK20-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK20-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK20-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK20-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK20-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK20-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
// CHECK20-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
// CHECK20-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK20-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4
// CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK20-NEXT: store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
// CHECK20-NEXT: store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
// CHECK20-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK20-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK20-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK20-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK20-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
// CHECK20-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32 1073741859, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 1)
// CHECK20-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK20: omp.dispatch.cond:
// CHECK20-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
// CHECK20-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
// CHECK20-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK20: omp.dispatch.body:
// CHECK20-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK20-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK20: omp.inner.for.cond:
// CHECK20-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
// CHECK20-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !22
// CHECK20-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
// CHECK20-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK20: omp.inner.for.body:
// CHECK20-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
// CHECK20-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
// CHECK20-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK20-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !22
// CHECK20-NEXT: [[TMP12:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !22
// CHECK20-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 [[TMP12]]
// CHECK20-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !22
// CHECK20-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK20: omp.body.continue:
// CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK20: omp.inner.for.inc:
// CHECK20-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
// CHECK20-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP13]], 1
// CHECK20-NEXT: store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22
// CHECK20-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]]
// CHECK20: omp.inner.for.end:
// CHECK20-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK20: omp.dispatch.inc:
// CHECK20-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK20: omp.dispatch.end:
// CHECK20-NEXT: ret void
//
//
// CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128
// CHECK20-SAME: ([10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK20-NEXT: entry:
// CHECK20-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK20-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
// CHECK20-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
// CHECK20-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK20-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
// CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK20-NEXT: store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..29 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP2]])
// CHECK20-NEXT: ret void
//
//
// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..29
// CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK20-NEXT: entry:
// CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK20-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
// CHECK20-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK20-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK20-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
// CHECK20-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK20-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
// CHECK20-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK20-NEXT: store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK20-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK20-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK20-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK20-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
// CHECK20-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK20: cond.true:
// CHECK20-NEXT: br label [[COND_END:%.*]]
// CHECK20: cond.false:
// CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: br label [[COND_END]]
// CHECK20: cond.end:
// CHECK20-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
// CHECK20-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK20-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK20: omp.inner.for.cond:
// CHECK20-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
// CHECK20-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK20: omp.inner.for.body:
// CHECK20-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
// CHECK20-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
// CHECK20-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK20-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK20-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4
// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..30 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]])
// CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK20: omp.inner.for.inc:
// CHECK20-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
// CHECK20-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
// CHECK20-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK20: omp.inner.for.end:
// CHECK20-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK20: omp.loop.exit:
// CHECK20-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK20-NEXT: ret void
//
//
// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..30
// CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[DOTPREVIOUS_LB_:%.*]], i32 [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[A:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
// CHECK20-NEXT: entry:
// CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK20-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4
// CHECK20-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK20-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK20-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK20-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK20-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK20-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK20-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4
// CHECK20-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK20-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4
// CHECK20-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK20-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4
// CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
// CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
// CHECK20-NEXT: store i32 [[TMP1]], i32* [[DOTOMP_LB]], align 4
// CHECK20-NEXT: store i32 [[TMP2]], i32* [[DOTOMP_UB]], align 4
// CHECK20-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK20-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK20-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4
// CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK20-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK20-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK20-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
// CHECK20-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], i32 1073741859, i32 [[TMP4]], i32 [[TMP5]], i32 1, i32 [[TMP3]])
// CHECK20-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK20: omp.dispatch.cond:
// CHECK20-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
// CHECK20-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP8]], 0
// CHECK20-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK20: omp.dispatch.body:
// CHECK20-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK20-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4
// CHECK20-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK20: omp.inner.for.cond:
// CHECK20-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
// CHECK20-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !25
// CHECK20-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
// CHECK20-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK20: omp.inner.for.body:
// CHECK20-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
// CHECK20-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
// CHECK20-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK20-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !25
// CHECK20-NEXT: [[TMP13:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !25
// CHECK20-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 [[TMP13]]
// CHECK20-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !25
// CHECK20-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK20: omp.body.continue:
// CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK20: omp.inner.for.inc:
// CHECK20-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
// CHECK20-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP14]], 1
// CHECK20-NEXT: store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25
// CHECK20-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]]
// CHECK20: omp.inner.for.end:
// CHECK20-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK20: omp.dispatch.inc:
// CHECK20-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK20: omp.dispatch.end:
// CHECK20-NEXT: ret void
//
//
// CHECK20-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
// CHECK20-SAME: () #[[ATTR5:[0-9]+]] {
// CHECK20-NEXT: entry:
// CHECK20-NEXT: call void @__tgt_register_requires(i64 1)
// CHECK20-NEXT: ret void
//