blob: 053365a29bfc074789e08a3c986a9ce3c116a0ce [file] [log] [blame]
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
// expected-no-diagnostics
#ifndef HEADER
#define HEADER
///==========================================================================///
// RUN: %clang_cc1 -DCK4 -verify -fopenmp -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple powerpc64le-unknown-unknown -emit-llvm %s -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK1
// RUN: %clang_cc1 -DCK4 -fopenmp -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple powerpc64le-unknown-unknown -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK2
// RUN: %clang_cc1 -DCK4 -verify -fopenmp -fopenmp-targets=i386-pc-linux-gnu -x c++ -triple i386-unknown-unknown -emit-llvm %s -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK3
// RUN: %clang_cc1 -DCK4 -fopenmp -fopenmp-targets=i386-pc-linux-gnu -x c++ -std=c++11 -triple i386-unknown-unknown -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp -fopenmp-targets=i386-pc-linux-gnu -x c++ -triple i386-unknown-unknown -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK4
// RUN: %clang_cc1 -DCK4 -verify -fopenmp -fopenmp-version=45 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple powerpc64le-unknown-unknown -emit-llvm %s -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK5
// RUN: %clang_cc1 -DCK4 -fopenmp -fopenmp-version=45 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple powerpc64le-unknown-unknown -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK6
// RUN: %clang_cc1 -DCK4 -verify -fopenmp -fopenmp-version=45 -fopenmp-targets=i386-pc-linux-gnu -x c++ -triple i386-unknown-unknown -emit-llvm %s -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK7
// RUN: %clang_cc1 -DCK4 -fopenmp -fopenmp-version=45 -fopenmp-targets=i386-pc-linux-gnu -x c++ -std=c++11 -triple i386-unknown-unknown -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -fopenmp-targets=i386-pc-linux-gnu -x c++ -triple i386-unknown-unknown -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK8
// RUN: %clang_cc1 -DCK4 -verify -fopenmp -fopenmp-version=50 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple powerpc64le-unknown-unknown -emit-llvm %s -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK9
// RUN: %clang_cc1 -DCK4 -fopenmp -fopenmp-version=50 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp -fopenmp-version=50 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple powerpc64le-unknown-unknown -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK10
// RUN: %clang_cc1 -DCK4 -verify -fopenmp -fopenmp-version=50 -fopenmp-targets=i386-pc-linux-gnu -x c++ -triple i386-unknown-unknown -emit-llvm %s -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK11
// RUN: %clang_cc1 -DCK4 -fopenmp -fopenmp-version=50 -fopenmp-targets=i386-pc-linux-gnu -x c++ -std=c++11 -triple i386-unknown-unknown -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp -fopenmp-version=50 -fopenmp-targets=i386-pc-linux-gnu -x c++ -triple i386-unknown-unknown -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK12
// RUN: %clang_cc1 -DCK4 -verify -fopenmp-simd -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple powerpc64le-unknown-unknown -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
// RUN: %clang_cc1 -DCK4 -fopenmp-simd -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp-simd -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple powerpc64le-unknown-unknown -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
// RUN: %clang_cc1 -DCK4 -verify -fopenmp-simd -fopenmp-targets=i386-pc-linux-gnu -x c++ -triple i386-unknown-unknown -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
// RUN: %clang_cc1 -DCK4 -fopenmp-simd -fopenmp-targets=i386-pc-linux-gnu -x c++ -std=c++11 -triple i386-unknown-unknown -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp-simd -fopenmp-targets=i386-pc-linux-gnu -x c++ -triple i386-unknown-unknown -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
#ifdef CK4
// Map types: OMP_MAP_PRIVATE_VAL | OMP_MAP_TARGET_PARAM | OMP_MAP_IMPLICIT = 800
void implicit_maps_nested_integer (int a){
int i = a;
// The captures in parallel are by reference. Only the capture in target is by
// copy.
#pragma omp parallel
{
#pragma omp target
{
#pragma omp parallel
{
++i;
}
}
}
}
#endif // CK4
#endif
// CHECK1-LABEL: define {{[^@]+}}@_Z28implicit_maps_nested_integeri
// CHECK1-SAME: (i32 signext [[A:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK1-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK1-NEXT: store i32 [[TMP0]], i32* [[I]], align 4
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[I]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[I:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[I_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[I_CASTED:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8
// CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8
// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[I]], i32** [[I_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[I_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[I_CASTED]] to i32*
// CHECK1-NEXT: store i32 [[TMP1]], i32* [[CONV]], align 4
// CHECK1-NEXT: [[TMP2:%.*]] = load i64, i64* [[I_CASTED]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK1-NEXT: [[TMP4:%.*]] = bitcast i8** [[TMP3]] to i64*
// CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP4]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK1-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i64*
// CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP6]], align 8
// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
// CHECK1-NEXT: store i8* null, i8** [[TMP7]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK1-NEXT: [[TMP10:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z28implicit_maps_nested_integeri_l48.region_id, i32 1, i8** [[TMP8]], i8** [[TMP9]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
// CHECK1-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0
// CHECK1-NEXT: br i1 [[TMP11]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK1: omp_offload.failed:
// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z28implicit_maps_nested_integeri_l48(i64 [[TMP2]]) #[[ATTR3:[0-9]+]]
// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK1: omp_offload.cont:
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z28implicit_maps_nested_integeri_l48
// CHECK1-SAME: (i64 [[I:%.*]]) #[[ATTR2:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[I_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: store i64 [[I]], i64* [[I_ADDR]], align 8
// CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[I_ADDR]] to i32*
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32* [[CONV]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[I:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[I_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[I]], i32** [[I_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[I_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP1]], 1
// CHECK1-NEXT: store i32 [[INC]], i32* [[TMP0]], align 4
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
// CHECK1-SAME: () #[[ATTR4:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: call void @__tgt_register_requires(i64 1)
// CHECK1-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@_Z28implicit_maps_nested_integeri
// CHECK2-SAME: (i32 signext [[A:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK2-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK2-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK2-NEXT: store i32 [[TMP0]], i32* [[I]], align 4
// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[I]])
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[I:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[I_ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[I_CASTED:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8
// CHECK2-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8
// CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK2-NEXT: store i32* [[I]], i32** [[I_ADDR]], align 8
// CHECK2-NEXT: [[TMP0:%.*]] = load i32*, i32** [[I_ADDR]], align 8
// CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[I_CASTED]] to i32*
// CHECK2-NEXT: store i32 [[TMP1]], i32* [[CONV]], align 4
// CHECK2-NEXT: [[TMP2:%.*]] = load i64, i64* [[I_CASTED]], align 8
// CHECK2-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK2-NEXT: [[TMP4:%.*]] = bitcast i8** [[TMP3]] to i64*
// CHECK2-NEXT: store i64 [[TMP2]], i64* [[TMP4]], align 8
// CHECK2-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK2-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i64*
// CHECK2-NEXT: store i64 [[TMP2]], i64* [[TMP6]], align 8
// CHECK2-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
// CHECK2-NEXT: store i8* null, i8** [[TMP7]], align 8
// CHECK2-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK2-NEXT: [[TMP10:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z28implicit_maps_nested_integeri_l48.region_id, i32 1, i8** [[TMP8]], i8** [[TMP9]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
// CHECK2-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0
// CHECK2-NEXT: br i1 [[TMP11]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK2: omp_offload.failed:
// CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z28implicit_maps_nested_integeri_l48(i64 [[TMP2]]) #[[ATTR3:[0-9]+]]
// CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK2: omp_offload.cont:
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z28implicit_maps_nested_integeri_l48
// CHECK2-SAME: (i64 [[I:%.*]]) #[[ATTR2:[0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[I_ADDR:%.*]] = alloca i64, align 8
// CHECK2-NEXT: store i64 [[I]], i64* [[I_ADDR]], align 8
// CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[I_ADDR]] to i32*
// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32* [[CONV]])
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[I:%.*]]) #[[ATTR1]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[I_ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK2-NEXT: store i32* [[I]], i32** [[I_ADDR]], align 8
// CHECK2-NEXT: [[TMP0:%.*]] = load i32*, i32** [[I_ADDR]], align 8
// CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK2-NEXT: [[INC:%.*]] = add nsw i32 [[TMP1]], 1
// CHECK2-NEXT: store i32 [[INC]], i32* [[TMP0]], align 4
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
// CHECK2-SAME: () #[[ATTR4:[0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: call void @__tgt_register_requires(i64 1)
// CHECK2-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@_Z28implicit_maps_nested_integeri
// CHECK3-SAME: (i32 [[A:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK3-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK3-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK3-NEXT: store i32 [[TMP0]], i32* [[I]], align 4
// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[I]])
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[I:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[I_ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[I_CASTED:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 4
// CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 4
// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 4
// CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK3-NEXT: store i32* [[I]], i32** [[I_ADDR]], align 4
// CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[I_ADDR]], align 4
// CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK3-NEXT: store i32 [[TMP1]], i32* [[I_CASTED]], align 4
// CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[I_CASTED]], align 4
// CHECK3-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK3-NEXT: [[TMP4:%.*]] = bitcast i8** [[TMP3]] to i32*
// CHECK3-NEXT: store i32 [[TMP2]], i32* [[TMP4]], align 4
// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK3-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32*
// CHECK3-NEXT: store i32 [[TMP2]], i32* [[TMP6]], align 4
// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// CHECK3-NEXT: store i8* null, i8** [[TMP7]], align 4
// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK3-NEXT: [[TMP10:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z28implicit_maps_nested_integeri_l48.region_id, i32 1, i8** [[TMP8]], i8** [[TMP9]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
// CHECK3-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0
// CHECK3-NEXT: br i1 [[TMP11]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK3: omp_offload.failed:
// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z28implicit_maps_nested_integeri_l48(i32 [[TMP2]]) #[[ATTR3:[0-9]+]]
// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK3: omp_offload.cont:
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z28implicit_maps_nested_integeri_l48
// CHECK3-SAME: (i32 [[I:%.*]]) #[[ATTR2:[0-9]+]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[I_ADDR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: store i32 [[I]], i32* [[I_ADDR]], align 4
// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32* [[I_ADDR]])
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[I:%.*]]) #[[ATTR1]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[I_ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK3-NEXT: store i32* [[I]], i32** [[I_ADDR]], align 4
// CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[I_ADDR]], align 4
// CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK3-NEXT: [[INC:%.*]] = add nsw i32 [[TMP1]], 1
// CHECK3-NEXT: store i32 [[INC]], i32* [[TMP0]], align 4
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
// CHECK3-SAME: () #[[ATTR4:[0-9]+]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: call void @__tgt_register_requires(i64 1)
// CHECK3-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@_Z28implicit_maps_nested_integeri
// CHECK4-SAME: (i32 [[A:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK4-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK4-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK4-NEXT: store i32 [[TMP0]], i32* [[I]], align 4
// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[I]])
// CHECK4-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[I:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK4-NEXT: [[I_ADDR:%.*]] = alloca i32*, align 4
// CHECK4-NEXT: [[I_CASTED:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 4
// CHECK4-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 4
// CHECK4-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 4
// CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK4-NEXT: store i32* [[I]], i32** [[I_ADDR]], align 4
// CHECK4-NEXT: [[TMP0:%.*]] = load i32*, i32** [[I_ADDR]], align 4
// CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK4-NEXT: store i32 [[TMP1]], i32* [[I_CASTED]], align 4
// CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[I_CASTED]], align 4
// CHECK4-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK4-NEXT: [[TMP4:%.*]] = bitcast i8** [[TMP3]] to i32*
// CHECK4-NEXT: store i32 [[TMP2]], i32* [[TMP4]], align 4
// CHECK4-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK4-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32*
// CHECK4-NEXT: store i32 [[TMP2]], i32* [[TMP6]], align 4
// CHECK4-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// CHECK4-NEXT: store i8* null, i8** [[TMP7]], align 4
// CHECK4-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK4-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK4-NEXT: [[TMP10:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z28implicit_maps_nested_integeri_l48.region_id, i32 1, i8** [[TMP8]], i8** [[TMP9]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
// CHECK4-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0
// CHECK4-NEXT: br i1 [[TMP11]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK4: omp_offload.failed:
// CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z28implicit_maps_nested_integeri_l48(i32 [[TMP2]]) #[[ATTR3:[0-9]+]]
// CHECK4-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK4: omp_offload.cont:
// CHECK4-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z28implicit_maps_nested_integeri_l48
// CHECK4-SAME: (i32 [[I:%.*]]) #[[ATTR2:[0-9]+]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[I_ADDR:%.*]] = alloca i32, align 4
// CHECK4-NEXT: store i32 [[I]], i32* [[I_ADDR]], align 4
// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32* [[I_ADDR]])
// CHECK4-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[I:%.*]]) #[[ATTR1]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK4-NEXT: [[I_ADDR:%.*]] = alloca i32*, align 4
// CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK4-NEXT: store i32* [[I]], i32** [[I_ADDR]], align 4
// CHECK4-NEXT: [[TMP0:%.*]] = load i32*, i32** [[I_ADDR]], align 4
// CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK4-NEXT: [[INC:%.*]] = add nsw i32 [[TMP1]], 1
// CHECK4-NEXT: store i32 [[INC]], i32* [[TMP0]], align 4
// CHECK4-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
// CHECK4-SAME: () #[[ATTR4:[0-9]+]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: call void @__tgt_register_requires(i64 1)
// CHECK4-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@_Z28implicit_maps_nested_integeri
// CHECK5-SAME: (i32 signext [[A:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK5-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK5-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK5-NEXT: store i32 [[TMP0]], i32* [[I]], align 4
// CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[I]])
// CHECK5-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[I:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK5-NEXT: [[I_ADDR:%.*]] = alloca i32*, align 8
// CHECK5-NEXT: [[I_CASTED:%.*]] = alloca i64, align 8
// CHECK5-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8
// CHECK5-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8
// CHECK5-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8
// CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK5-NEXT: store i32* [[I]], i32** [[I_ADDR]], align 8
// CHECK5-NEXT: [[TMP0:%.*]] = load i32*, i32** [[I_ADDR]], align 8
// CHECK5-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK5-NEXT: [[CONV:%.*]] = bitcast i64* [[I_CASTED]] to i32*
// CHECK5-NEXT: store i32 [[TMP1]], i32* [[CONV]], align 4
// CHECK5-NEXT: [[TMP2:%.*]] = load i64, i64* [[I_CASTED]], align 8
// CHECK5-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK5-NEXT: [[TMP4:%.*]] = bitcast i8** [[TMP3]] to i64*
// CHECK5-NEXT: store i64 [[TMP2]], i64* [[TMP4]], align 8
// CHECK5-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK5-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i64*
// CHECK5-NEXT: store i64 [[TMP2]], i64* [[TMP6]], align 8
// CHECK5-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
// CHECK5-NEXT: store i8* null, i8** [[TMP7]], align 8
// CHECK5-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK5-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK5-NEXT: [[TMP10:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z28implicit_maps_nested_integeri_l48.region_id, i32 1, i8** [[TMP8]], i8** [[TMP9]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
// CHECK5-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0
// CHECK5-NEXT: br i1 [[TMP11]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK5: omp_offload.failed:
// CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z28implicit_maps_nested_integeri_l48(i64 [[TMP2]]) #[[ATTR3:[0-9]+]]
// CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK5: omp_offload.cont:
// CHECK5-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z28implicit_maps_nested_integeri_l48
// CHECK5-SAME: (i64 [[I:%.*]]) #[[ATTR2:[0-9]+]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[I_ADDR:%.*]] = alloca i64, align 8
// CHECK5-NEXT: store i64 [[I]], i64* [[I_ADDR]], align 8
// CHECK5-NEXT: [[CONV:%.*]] = bitcast i64* [[I_ADDR]] to i32*
// CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32* [[CONV]])
// CHECK5-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[I:%.*]]) #[[ATTR1]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK5-NEXT: [[I_ADDR:%.*]] = alloca i32*, align 8
// CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK5-NEXT: store i32* [[I]], i32** [[I_ADDR]], align 8
// CHECK5-NEXT: [[TMP0:%.*]] = load i32*, i32** [[I_ADDR]], align 8
// CHECK5-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK5-NEXT: [[INC:%.*]] = add nsw i32 [[TMP1]], 1
// CHECK5-NEXT: store i32 [[INC]], i32* [[TMP0]], align 4
// CHECK5-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
// CHECK5-SAME: () #[[ATTR4:[0-9]+]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: call void @__tgt_register_requires(i64 1)
// CHECK5-NEXT: ret void
//
//
// CHECK6-LABEL: define {{[^@]+}}@_Z28implicit_maps_nested_integeri
// CHECK6-SAME: (i32 signext [[A:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK6-NEXT: entry:
// CHECK6-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK6-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK6-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK6-NEXT: store i32 [[TMP0]], i32* [[I]], align 4
// CHECK6-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[I]])
// CHECK6-NEXT: ret void
//
//
// CHECK6-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[I:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK6-NEXT: entry:
// CHECK6-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK6-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK6-NEXT: [[I_ADDR:%.*]] = alloca i32*, align 8
// CHECK6-NEXT: [[I_CASTED:%.*]] = alloca i64, align 8
// CHECK6-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8
// CHECK6-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8
// CHECK6-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8
// CHECK6-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK6-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK6-NEXT: store i32* [[I]], i32** [[I_ADDR]], align 8
// CHECK6-NEXT: [[TMP0:%.*]] = load i32*, i32** [[I_ADDR]], align 8
// CHECK6-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK6-NEXT: [[CONV:%.*]] = bitcast i64* [[I_CASTED]] to i32*
// CHECK6-NEXT: store i32 [[TMP1]], i32* [[CONV]], align 4
// CHECK6-NEXT: [[TMP2:%.*]] = load i64, i64* [[I_CASTED]], align 8
// CHECK6-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK6-NEXT: [[TMP4:%.*]] = bitcast i8** [[TMP3]] to i64*
// CHECK6-NEXT: store i64 [[TMP2]], i64* [[TMP4]], align 8
// CHECK6-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK6-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i64*
// CHECK6-NEXT: store i64 [[TMP2]], i64* [[TMP6]], align 8
// CHECK6-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
// CHECK6-NEXT: store i8* null, i8** [[TMP7]], align 8
// CHECK6-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK6-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK6-NEXT: [[TMP10:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z28implicit_maps_nested_integeri_l48.region_id, i32 1, i8** [[TMP8]], i8** [[TMP9]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
// CHECK6-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0
// CHECK6-NEXT: br i1 [[TMP11]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK6: omp_offload.failed:
// CHECK6-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z28implicit_maps_nested_integeri_l48(i64 [[TMP2]]) #[[ATTR3:[0-9]+]]
// CHECK6-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK6: omp_offload.cont:
// CHECK6-NEXT: ret void
//
//
// CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z28implicit_maps_nested_integeri_l48
// CHECK6-SAME: (i64 [[I:%.*]]) #[[ATTR2:[0-9]+]] {
// CHECK6-NEXT: entry:
// CHECK6-NEXT: [[I_ADDR:%.*]] = alloca i64, align 8
// CHECK6-NEXT: store i64 [[I]], i64* [[I_ADDR]], align 8
// CHECK6-NEXT: [[CONV:%.*]] = bitcast i64* [[I_ADDR]] to i32*
// CHECK6-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32* [[CONV]])
// CHECK6-NEXT: ret void
//
//
// CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[I:%.*]]) #[[ATTR1]] {
// CHECK6-NEXT: entry:
// CHECK6-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK6-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK6-NEXT: [[I_ADDR:%.*]] = alloca i32*, align 8
// CHECK6-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK6-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK6-NEXT: store i32* [[I]], i32** [[I_ADDR]], align 8
// CHECK6-NEXT: [[TMP0:%.*]] = load i32*, i32** [[I_ADDR]], align 8
// CHECK6-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK6-NEXT: [[INC:%.*]] = add nsw i32 [[TMP1]], 1
// CHECK6-NEXT: store i32 [[INC]], i32* [[TMP0]], align 4
// CHECK6-NEXT: ret void
//
//
// CHECK6-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
// CHECK6-SAME: () #[[ATTR4:[0-9]+]] {
// CHECK6-NEXT: entry:
// CHECK6-NEXT: call void @__tgt_register_requires(i64 1)
// CHECK6-NEXT: ret void
//
//
// CHECK7-LABEL: define {{[^@]+}}@_Z28implicit_maps_nested_integeri
// CHECK7-SAME: (i32 [[A:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK7-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK7-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK7-NEXT: store i32 [[TMP0]], i32* [[I]], align 4
// CHECK7-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[I]])
// CHECK7-NEXT: ret void
//
//
// CHECK7-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK7-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[I:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK7-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK7-NEXT: [[I_ADDR:%.*]] = alloca i32*, align 4
// CHECK7-NEXT: [[I_CASTED:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 4
// CHECK7-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 4
// CHECK7-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 4
// CHECK7-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK7-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK7-NEXT: store i32* [[I]], i32** [[I_ADDR]], align 4
// CHECK7-NEXT: [[TMP0:%.*]] = load i32*, i32** [[I_ADDR]], align 4
// CHECK7-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK7-NEXT: store i32 [[TMP1]], i32* [[I_CASTED]], align 4
// CHECK7-NEXT: [[TMP2:%.*]] = load i32, i32* [[I_CASTED]], align 4
// CHECK7-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK7-NEXT: [[TMP4:%.*]] = bitcast i8** [[TMP3]] to i32*
// CHECK7-NEXT: store i32 [[TMP2]], i32* [[TMP4]], align 4
// CHECK7-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK7-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32*
// CHECK7-NEXT: store i32 [[TMP2]], i32* [[TMP6]], align 4
// CHECK7-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// CHECK7-NEXT: store i8* null, i8** [[TMP7]], align 4
// CHECK7-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK7-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK7-NEXT: [[TMP10:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z28implicit_maps_nested_integeri_l48.region_id, i32 1, i8** [[TMP8]], i8** [[TMP9]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
// CHECK7-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0
// CHECK7-NEXT: br i1 [[TMP11]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK7: omp_offload.failed:
// CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z28implicit_maps_nested_integeri_l48(i32 [[TMP2]]) #[[ATTR3:[0-9]+]]
// CHECK7-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK7: omp_offload.cont:
// CHECK7-NEXT: ret void
//
//
// CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z28implicit_maps_nested_integeri_l48
// CHECK7-SAME: (i32 [[I:%.*]]) #[[ATTR2:[0-9]+]] {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: [[I_ADDR:%.*]] = alloca i32, align 4
// CHECK7-NEXT: store i32 [[I]], i32* [[I_ADDR]], align 4
// CHECK7-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32* [[I_ADDR]])
// CHECK7-NEXT: ret void
//
//
// CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK7-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[I:%.*]]) #[[ATTR1]] {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK7-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK7-NEXT: [[I_ADDR:%.*]] = alloca i32*, align 4
// CHECK7-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK7-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK7-NEXT: store i32* [[I]], i32** [[I_ADDR]], align 4
// CHECK7-NEXT: [[TMP0:%.*]] = load i32*, i32** [[I_ADDR]], align 4
// CHECK7-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK7-NEXT: [[INC:%.*]] = add nsw i32 [[TMP1]], 1
// CHECK7-NEXT: store i32 [[INC]], i32* [[TMP0]], align 4
// CHECK7-NEXT: ret void
//
//
// CHECK7-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
// CHECK7-SAME: () #[[ATTR4:[0-9]+]] {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: call void @__tgt_register_requires(i64 1)
// CHECK7-NEXT: ret void
//
//
// CHECK8-LABEL: define {{[^@]+}}@_Z28implicit_maps_nested_integeri
// CHECK8-SAME: (i32 [[A:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK8-NEXT: entry:
// CHECK8-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK8-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK8-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK8-NEXT: store i32 [[TMP0]], i32* [[I]], align 4
// CHECK8-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[I]])
// CHECK8-NEXT: ret void
//
//
// CHECK8-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK8-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[I:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK8-NEXT: entry:
// CHECK8-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK8-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK8-NEXT: [[I_ADDR:%.*]] = alloca i32*, align 4
// CHECK8-NEXT: [[I_CASTED:%.*]] = alloca i32, align 4
// CHECK8-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 4
// CHECK8-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 4
// CHECK8-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 4
// CHECK8-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK8-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK8-NEXT: store i32* [[I]], i32** [[I_ADDR]], align 4
// CHECK8-NEXT: [[TMP0:%.*]] = load i32*, i32** [[I_ADDR]], align 4
// CHECK8-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK8-NEXT: store i32 [[TMP1]], i32* [[I_CASTED]], align 4
// CHECK8-NEXT: [[TMP2:%.*]] = load i32, i32* [[I_CASTED]], align 4
// CHECK8-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK8-NEXT: [[TMP4:%.*]] = bitcast i8** [[TMP3]] to i32*
// CHECK8-NEXT: store i32 [[TMP2]], i32* [[TMP4]], align 4
// CHECK8-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK8-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32*
// CHECK8-NEXT: store i32 [[TMP2]], i32* [[TMP6]], align 4
// CHECK8-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// CHECK8-NEXT: store i8* null, i8** [[TMP7]], align 4
// CHECK8-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK8-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK8-NEXT: [[TMP10:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z28implicit_maps_nested_integeri_l48.region_id, i32 1, i8** [[TMP8]], i8** [[TMP9]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
// CHECK8-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0
// CHECK8-NEXT: br i1 [[TMP11]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK8: omp_offload.failed:
// CHECK8-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z28implicit_maps_nested_integeri_l48(i32 [[TMP2]]) #[[ATTR3:[0-9]+]]
// CHECK8-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK8: omp_offload.cont:
// CHECK8-NEXT: ret void
//
//
// CHECK8-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z28implicit_maps_nested_integeri_l48
// CHECK8-SAME: (i32 [[I:%.*]]) #[[ATTR2:[0-9]+]] {
// CHECK8-NEXT: entry:
// CHECK8-NEXT: [[I_ADDR:%.*]] = alloca i32, align 4
// CHECK8-NEXT: store i32 [[I]], i32* [[I_ADDR]], align 4
// CHECK8-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32* [[I_ADDR]])
// CHECK8-NEXT: ret void
//
//
// CHECK8-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK8-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[I:%.*]]) #[[ATTR1]] {
// CHECK8-NEXT: entry:
// CHECK8-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK8-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK8-NEXT: [[I_ADDR:%.*]] = alloca i32*, align 4
// CHECK8-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK8-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK8-NEXT: store i32* [[I]], i32** [[I_ADDR]], align 4
// CHECK8-NEXT: [[TMP0:%.*]] = load i32*, i32** [[I_ADDR]], align 4
// CHECK8-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK8-NEXT: [[INC:%.*]] = add nsw i32 [[TMP1]], 1
// CHECK8-NEXT: store i32 [[INC]], i32* [[TMP0]], align 4
// CHECK8-NEXT: ret void
//
//
// CHECK8-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
// CHECK8-SAME: () #[[ATTR4:[0-9]+]] {
// CHECK8-NEXT: entry:
// CHECK8-NEXT: call void @__tgt_register_requires(i64 1)
// CHECK8-NEXT: ret void
//
//
// CHECK9-LABEL: define {{[^@]+}}@_Z28implicit_maps_nested_integeri
// CHECK9-SAME: (i32 signext [[A:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK9-NEXT: entry:
// CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK9-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK9-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK9-NEXT: store i32 [[TMP0]], i32* [[I]], align 4
// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[I]])
// CHECK9-NEXT: ret void
//
//
// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[I:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK9-NEXT: entry:
// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK9-NEXT: [[I_ADDR:%.*]] = alloca i32*, align 8
// CHECK9-NEXT: [[I_CASTED:%.*]] = alloca i64, align 8
// CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8
// CHECK9-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8
// CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8
// CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK9-NEXT: store i32* [[I]], i32** [[I_ADDR]], align 8
// CHECK9-NEXT: [[TMP0:%.*]] = load i32*, i32** [[I_ADDR]], align 8
// CHECK9-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[I_CASTED]] to i32*
// CHECK9-NEXT: store i32 [[TMP1]], i32* [[CONV]], align 4
// CHECK9-NEXT: [[TMP2:%.*]] = load i64, i64* [[I_CASTED]], align 8
// CHECK9-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK9-NEXT: [[TMP4:%.*]] = bitcast i8** [[TMP3]] to i64*
// CHECK9-NEXT: store i64 [[TMP2]], i64* [[TMP4]], align 8
// CHECK9-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK9-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i64*
// CHECK9-NEXT: store i64 [[TMP2]], i64* [[TMP6]], align 8
// CHECK9-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
// CHECK9-NEXT: store i8* null, i8** [[TMP7]], align 8
// CHECK9-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK9-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK9-NEXT: [[TMP10:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z28implicit_maps_nested_integeri_l48.region_id, i32 1, i8** [[TMP8]], i8** [[TMP9]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
// CHECK9-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0
// CHECK9-NEXT: br i1 [[TMP11]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK9: omp_offload.failed:
// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z28implicit_maps_nested_integeri_l48(i64 [[TMP2]]) #[[ATTR3:[0-9]+]]
// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK9: omp_offload.cont:
// CHECK9-NEXT: ret void
//
//
// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z28implicit_maps_nested_integeri_l48
// CHECK9-SAME: (i64 [[I:%.*]]) #[[ATTR2:[0-9]+]] {
// CHECK9-NEXT: entry:
// CHECK9-NEXT: [[I_ADDR:%.*]] = alloca i64, align 8
// CHECK9-NEXT: store i64 [[I]], i64* [[I_ADDR]], align 8
// CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[I_ADDR]] to i32*
// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32* [[CONV]])
// CHECK9-NEXT: ret void
//
//
// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[I:%.*]]) #[[ATTR1]] {
// CHECK9-NEXT: entry:
// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK9-NEXT: [[I_ADDR:%.*]] = alloca i32*, align 8
// CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK9-NEXT: store i32* [[I]], i32** [[I_ADDR]], align 8
// CHECK9-NEXT: [[TMP0:%.*]] = load i32*, i32** [[I_ADDR]], align 8
// CHECK9-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK9-NEXT: [[INC:%.*]] = add nsw i32 [[TMP1]], 1
// CHECK9-NEXT: store i32 [[INC]], i32* [[TMP0]], align 4
// CHECK9-NEXT: ret void
//
//
// CHECK9-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
// CHECK9-SAME: () #[[ATTR4:[0-9]+]] {
// CHECK9-NEXT: entry:
// CHECK9-NEXT: call void @__tgt_register_requires(i64 1)
// CHECK9-NEXT: ret void
//
//
// CHECK10-LABEL: define {{[^@]+}}@_Z28implicit_maps_nested_integeri
// CHECK10-SAME: (i32 signext [[A:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK10-NEXT: entry:
// CHECK10-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK10-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK10-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK10-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK10-NEXT: store i32 [[TMP0]], i32* [[I]], align 4
// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[I]])
// CHECK10-NEXT: ret void
//
//
// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[I:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK10-NEXT: entry:
// CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK10-NEXT: [[I_ADDR:%.*]] = alloca i32*, align 8
// CHECK10-NEXT: [[I_CASTED:%.*]] = alloca i64, align 8
// CHECK10-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8
// CHECK10-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8
// CHECK10-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8
// CHECK10-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK10-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK10-NEXT: store i32* [[I]], i32** [[I_ADDR]], align 8
// CHECK10-NEXT: [[TMP0:%.*]] = load i32*, i32** [[I_ADDR]], align 8
// CHECK10-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK10-NEXT: [[CONV:%.*]] = bitcast i64* [[I_CASTED]] to i32*
// CHECK10-NEXT: store i32 [[TMP1]], i32* [[CONV]], align 4
// CHECK10-NEXT: [[TMP2:%.*]] = load i64, i64* [[I_CASTED]], align 8
// CHECK10-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK10-NEXT: [[TMP4:%.*]] = bitcast i8** [[TMP3]] to i64*
// CHECK10-NEXT: store i64 [[TMP2]], i64* [[TMP4]], align 8
// CHECK10-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK10-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i64*
// CHECK10-NEXT: store i64 [[TMP2]], i64* [[TMP6]], align 8
// CHECK10-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
// CHECK10-NEXT: store i8* null, i8** [[TMP7]], align 8
// CHECK10-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK10-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK10-NEXT: [[TMP10:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z28implicit_maps_nested_integeri_l48.region_id, i32 1, i8** [[TMP8]], i8** [[TMP9]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
// CHECK10-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0
// CHECK10-NEXT: br i1 [[TMP11]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK10: omp_offload.failed:
// CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z28implicit_maps_nested_integeri_l48(i64 [[TMP2]]) #[[ATTR3:[0-9]+]]
// CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK10: omp_offload.cont:
// CHECK10-NEXT: ret void
//
//
// CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z28implicit_maps_nested_integeri_l48
// CHECK10-SAME: (i64 [[I:%.*]]) #[[ATTR2:[0-9]+]] {
// CHECK10-NEXT: entry:
// CHECK10-NEXT: [[I_ADDR:%.*]] = alloca i64, align 8
// CHECK10-NEXT: store i64 [[I]], i64* [[I_ADDR]], align 8
// CHECK10-NEXT: [[CONV:%.*]] = bitcast i64* [[I_ADDR]] to i32*
// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32* [[CONV]])
// CHECK10-NEXT: ret void
//
//
// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[I:%.*]]) #[[ATTR1]] {
// CHECK10-NEXT: entry:
// CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK10-NEXT: [[I_ADDR:%.*]] = alloca i32*, align 8
// CHECK10-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK10-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK10-NEXT: store i32* [[I]], i32** [[I_ADDR]], align 8
// CHECK10-NEXT: [[TMP0:%.*]] = load i32*, i32** [[I_ADDR]], align 8
// CHECK10-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK10-NEXT: [[INC:%.*]] = add nsw i32 [[TMP1]], 1
// CHECK10-NEXT: store i32 [[INC]], i32* [[TMP0]], align 4
// CHECK10-NEXT: ret void
//
//
// CHECK10-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
// CHECK10-SAME: () #[[ATTR4:[0-9]+]] {
// CHECK10-NEXT: entry:
// CHECK10-NEXT: call void @__tgt_register_requires(i64 1)
// CHECK10-NEXT: ret void
//
//
// CHECK11-LABEL: define {{[^@]+}}@_Z28implicit_maps_nested_integeri
// CHECK11-SAME: (i32 [[A:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK11-NEXT: entry:
// CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK11-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK11-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK11-NEXT: store i32 [[TMP0]], i32* [[I]], align 4
// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[I]])
// CHECK11-NEXT: ret void
//
//
// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[I:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK11-NEXT: entry:
// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK11-NEXT: [[I_ADDR:%.*]] = alloca i32*, align 4
// CHECK11-NEXT: [[I_CASTED:%.*]] = alloca i32, align 4
// CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 4
// CHECK11-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 4
// CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 4
// CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK11-NEXT: store i32* [[I]], i32** [[I_ADDR]], align 4
// CHECK11-NEXT: [[TMP0:%.*]] = load i32*, i32** [[I_ADDR]], align 4
// CHECK11-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK11-NEXT: store i32 [[TMP1]], i32* [[I_CASTED]], align 4
// CHECK11-NEXT: [[TMP2:%.*]] = load i32, i32* [[I_CASTED]], align 4
// CHECK11-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK11-NEXT: [[TMP4:%.*]] = bitcast i8** [[TMP3]] to i32*
// CHECK11-NEXT: store i32 [[TMP2]], i32* [[TMP4]], align 4
// CHECK11-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK11-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32*
// CHECK11-NEXT: store i32 [[TMP2]], i32* [[TMP6]], align 4
// CHECK11-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// CHECK11-NEXT: store i8* null, i8** [[TMP7]], align 4
// CHECK11-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK11-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK11-NEXT: [[TMP10:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z28implicit_maps_nested_integeri_l48.region_id, i32 1, i8** [[TMP8]], i8** [[TMP9]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
// CHECK11-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0
// CHECK11-NEXT: br i1 [[TMP11]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK11: omp_offload.failed:
// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z28implicit_maps_nested_integeri_l48(i32 [[TMP2]]) #[[ATTR3:[0-9]+]]
// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK11: omp_offload.cont:
// CHECK11-NEXT: ret void
//
//
// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z28implicit_maps_nested_integeri_l48
// CHECK11-SAME: (i32 [[I:%.*]]) #[[ATTR2:[0-9]+]] {
// CHECK11-NEXT: entry:
// CHECK11-NEXT: [[I_ADDR:%.*]] = alloca i32, align 4
// CHECK11-NEXT: store i32 [[I]], i32* [[I_ADDR]], align 4
// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32* [[I_ADDR]])
// CHECK11-NEXT: ret void
//
//
// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[I:%.*]]) #[[ATTR1]] {
// CHECK11-NEXT: entry:
// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK11-NEXT: [[I_ADDR:%.*]] = alloca i32*, align 4
// CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK11-NEXT: store i32* [[I]], i32** [[I_ADDR]], align 4
// CHECK11-NEXT: [[TMP0:%.*]] = load i32*, i32** [[I_ADDR]], align 4
// CHECK11-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK11-NEXT: [[INC:%.*]] = add nsw i32 [[TMP1]], 1
// CHECK11-NEXT: store i32 [[INC]], i32* [[TMP0]], align 4
// CHECK11-NEXT: ret void
//
//
// CHECK11-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
// CHECK11-SAME: () #[[ATTR4:[0-9]+]] {
// CHECK11-NEXT: entry:
// CHECK11-NEXT: call void @__tgt_register_requires(i64 1)
// CHECK11-NEXT: ret void
//
//
// CHECK12-LABEL: define {{[^@]+}}@_Z28implicit_maps_nested_integeri
// CHECK12-SAME: (i32 [[A:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK12-NEXT: entry:
// CHECK12-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK12-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK12-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK12-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK12-NEXT: store i32 [[TMP0]], i32* [[I]], align 4
// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[I]])
// CHECK12-NEXT: ret void
//
//
// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[I:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK12-NEXT: entry:
// CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK12-NEXT: [[I_ADDR:%.*]] = alloca i32*, align 4
// CHECK12-NEXT: [[I_CASTED:%.*]] = alloca i32, align 4
// CHECK12-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 4
// CHECK12-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 4
// CHECK12-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 4
// CHECK12-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK12-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK12-NEXT: store i32* [[I]], i32** [[I_ADDR]], align 4
// CHECK12-NEXT: [[TMP0:%.*]] = load i32*, i32** [[I_ADDR]], align 4
// CHECK12-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK12-NEXT: store i32 [[TMP1]], i32* [[I_CASTED]], align 4
// CHECK12-NEXT: [[TMP2:%.*]] = load i32, i32* [[I_CASTED]], align 4
// CHECK12-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK12-NEXT: [[TMP4:%.*]] = bitcast i8** [[TMP3]] to i32*
// CHECK12-NEXT: store i32 [[TMP2]], i32* [[TMP4]], align 4
// CHECK12-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK12-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32*
// CHECK12-NEXT: store i32 [[TMP2]], i32* [[TMP6]], align 4
// CHECK12-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// CHECK12-NEXT: store i8* null, i8** [[TMP7]], align 4
// CHECK12-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK12-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK12-NEXT: [[TMP10:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z28implicit_maps_nested_integeri_l48.region_id, i32 1, i8** [[TMP8]], i8** [[TMP9]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0)
// CHECK12-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0
// CHECK12-NEXT: br i1 [[TMP11]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK12: omp_offload.failed:
// CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z28implicit_maps_nested_integeri_l48(i32 [[TMP2]]) #[[ATTR3:[0-9]+]]
// CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK12: omp_offload.cont:
// CHECK12-NEXT: ret void
//
//
// CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z28implicit_maps_nested_integeri_l48
// CHECK12-SAME: (i32 [[I:%.*]]) #[[ATTR2:[0-9]+]] {
// CHECK12-NEXT: entry:
// CHECK12-NEXT: [[I_ADDR:%.*]] = alloca i32, align 4
// CHECK12-NEXT: store i32 [[I]], i32* [[I_ADDR]], align 4
// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32* [[I_ADDR]])
// CHECK12-NEXT: ret void
//
//
// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[I:%.*]]) #[[ATTR1]] {
// CHECK12-NEXT: entry:
// CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK12-NEXT: [[I_ADDR:%.*]] = alloca i32*, align 4
// CHECK12-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK12-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK12-NEXT: store i32* [[I]], i32** [[I_ADDR]], align 4
// CHECK12-NEXT: [[TMP0:%.*]] = load i32*, i32** [[I_ADDR]], align 4
// CHECK12-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK12-NEXT: [[INC:%.*]] = add nsw i32 [[TMP1]], 1
// CHECK12-NEXT: store i32 [[INC]], i32* [[TMP0]], align 4
// CHECK12-NEXT: ret void
//
//
// CHECK12-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
// CHECK12-SAME: () #[[ATTR4:[0-9]+]] {
// CHECK12-NEXT: entry:
// CHECK12-NEXT: call void @__tgt_register_requires(i64 1)
// CHECK12-NEXT: ret void
//