blob: dc8343941a67c97a36385d9ccbab0a22eed9bc67 [file] [log] [blame]
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
// expected-no-diagnostics
#ifndef HEADER
#define HEADER
#ifdef CK1
///==========================================================================///
// RUN: %clang_cc1 -DCK1 -verify -fopenmp -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK1
// RUN: %clang_cc1 -DCK1 -fopenmp -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
// RUN: %clang_cc1 -DCK1 -fopenmp -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK2
// RUN: %clang_cc1 -DCK1 -verify -fopenmp-simd -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
// RUN: %clang_cc1 -DCK1 -fopenmp-simd -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
// RUN: %clang_cc1 -DCK1 -fopenmp-simd -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
void foo() { extern void mayThrow(); mayThrow(); }
void parallel_master() {
#pragma omp parallel master
foo();
}
#endif
#ifdef CK2
///==========================================================================///
// RUN: %clang_cc1 -DCK2 -verify -fopenmp -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK5
// RUN: %clang_cc1 -DCK2 -fopenmp -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
// RUN: %clang_cc1 -DCK2 -fopenmp -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK6
// RUN: %clang_cc1 -DCK2 -verify -fopenmp-simd -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
// RUN: %clang_cc1 -DCK2 -fopenmp-simd -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
// RUN: %clang_cc1 -DCK2 -fopenmp-simd -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
void parallel_master_private() {
int a;
#pragma omp parallel master private(a)
a++;
}
#endif
#ifdef CK3
///==========================================================================///
// RUN: %clang_cc1 -DCK3 -verify -fopenmp -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK9
// RUN: %clang_cc1 -DCK3 -fopenmp -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
// RUN: %clang_cc1 -DCK3 -fopenmp -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK10
// RUN: %clang_cc1 -DCK3 -verify -fopenmp-simd -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
// RUN: %clang_cc1 -DCK3 -fopenmp-simd -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
// RUN: %clang_cc1 -DCK3 -fopenmp-simd -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
void parallel_master_private() {
int a;
#pragma omp parallel master default(shared)
a++;
}
#endif
#ifdef CK31
///==========================================================================///
// RUN: %clang_cc1 -DCK31 -fopenmp-version=51 -verify -fopenmp -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK13
// RUN: %clang_cc1 -DCK31 -fopenmp-version=51 -fopenmp -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
// RUN: %clang_cc1 -DCK31 -fopenmp-version=51 -fopenmp -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK14
// RUN: %clang_cc1 -DCK31 -fopenmp-version=51 -verify -fopenmp-simd -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
// RUN: %clang_cc1 -DCK31 -fopenmp-version=51 -fopenmp-simd -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
// RUN: %clang_cc1 -DCK31 -fopenmp-version=51 -fopenmp-simd -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
void parallel_master_default_firstprivate() {
int a;
#pragma omp parallel master default(firstprivate)
a++;
}
#endif
#ifdef CK32
///==========================================================================///
// RUN: %clang_cc1 -DCK32 -fopenmp-version=51 -verify -fopenmp -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK17
// RUN: %clang_cc1 -DCK32 -fopenmp-version=51 -fopenmp -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
// RUN: %clang_cc1 -DCK32 -fopenmp-version=51 -fopenmp -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK18
// RUN: %clang_cc1 -DCK32 -fopenmp-version=51 -verify -fopenmp-simd -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
// RUN: %clang_cc1 -DCK32 -fopenmp-version=51 -fopenmp-simd -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
// RUN: %clang_cc1 -DCK32 -fopenmp-version=51 -fopenmp-simd -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
struct St {
int a, b;
static int y;
St() : a(0), b(0) {}
~St() {}
};
int St::y = 0;
void parallel_master_default_firstprivate() {
St a = St();
static int y = 0;
#pragma omp parallel master default(firstprivate)
{
a.a += 1;
a.b += 1;
y++;
a.y++;
}
}
#endif
#ifdef CK4
///==========================================================================///
// RUN: %clang_cc1 -DCK4 -verify -fopenmp -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK21
// RUN: %clang_cc1 -DCK4 -fopenmp -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
// RUN: %clang_cc1 -DCK4 -fopenmp -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK22
// RUN: %clang_cc1 -DCK4 -verify -fopenmp-simd -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
// RUN: %clang_cc1 -DCK4 -fopenmp-simd -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
// RUN: %clang_cc1 -DCK4 -fopenmp-simd -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
void parallel_master_firstprivate() {
int a;
#pragma omp parallel master firstprivate(a)
a++;
}
#endif
#ifdef CK5
///==========================================================================///
// RUN: %clang_cc1 -DCK5 -verify -fopenmp -fopenmp -fnoopenmp-use-tls -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK25
// RUN: %clang_cc1 -DCK5 -fopenmp -fopenmp -fnoopenmp-use-tls -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
// RUN: %clang_cc1 -DCK5 -fopenmp -fopenmp -fnoopenmp-use-tls -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK26
// RUN: %clang_cc1 -DCK5 -verify -fopenmp-simd -fnoopenmp-use-tls -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
// RUN: %clang_cc1 -DCK5 -fopenmp-simd -fnoopenmp-use-tls -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
// RUN: %clang_cc1 -DCK5 -fopenmp-simd -fnoopenmp-use-tls -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
// RUN: %clang_cc1 -DCK5 -verify -fopenmp -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK29
// RUN: %clang_cc1 -DCK5 -fopenmp -x c++ -std=c++11 -triple x86_64-unknown-unknown -emit-pch -o %t %s
// RUN: %clang_cc1 -DCK5 -fopenmp-simd -x c++ -std=c++11 -triple x86_64-unknown-unknown -emit-pch -o %t %s
int a;
#pragma omp threadprivate(a)
void parallel_master_copyin() {
#pragma omp parallel master copyin(a)
a++;
}
// TLC-CHECK-DAG: [[INC:%.+]] = add nsw i32 [[TEN]], 1
// TLC-CHECK-DAG: store i32 [[INC]], i32* [[TEN]]
#endif
#ifdef CK6
///==========================================================================///
// RUN: %clang_cc1 -DCK6 -fopenmp -x c++ -std=c++11 -triple x86_64-unknown-unknown -emit-pch -o %t %s
// RUN: %clang_cc1 -DCK6 -fopenmp-simd -x c++ -std=c++11 -triple x86_64-unknown-unknown -emit-pch -o %t %s
void parallel_master_reduction() {
int g;
#pragma omp parallel master reduction(+:g)
g = 1;
}
// switch
// case 1:
// case 2:
#endif
#ifdef CK7
///==========================================================================///
// RUN: %clang_cc1 -DCK7 -fopenmp -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
// RUN: %clang_cc1 -DCK7 -fopenmp-simd -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
void parallel_master_if() {
#pragma omp parallel master if (parallel: false)
parallel_master_if();
}
#endif
#ifdef CK8
///==========================================================================///
// RUN: %clang_cc1 -DCK8 -fopenmp -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
// RUN: %clang_cc1 -DCK8 -fopenmp-simd -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
typedef __INTPTR_TYPE__ intptr_t;
void foo();
struct S {
intptr_t a, b, c;
S(intptr_t a) : a(a) {}
operator char() { return a; }
~S() {}
};
template <typename T>
T tmain() {
#pragma omp parallel master proc_bind(master)
foo();
return T();
}
int main() {
#pragma omp parallel master proc_bind(spread)
foo();
#pragma omp parallel master proc_bind(close)
foo();
return tmain<int>();
}
#endif
#ifdef CK9
///==========================================================================///
// RUN: %clang_cc1 -DCK9 -fopenmp -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
// RUN: %clang_cc1 -DCK9 -fopenmp-simd -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
typedef void **omp_allocator_handle_t;
extern const omp_allocator_handle_t omp_null_allocator;
extern const omp_allocator_handle_t omp_default_mem_alloc;
extern const omp_allocator_handle_t omp_large_cap_mem_alloc;
extern const omp_allocator_handle_t omp_const_mem_alloc;
extern const omp_allocator_handle_t omp_high_bw_mem_alloc;
extern const omp_allocator_handle_t omp_low_lat_mem_alloc;
extern const omp_allocator_handle_t omp_cgroup_mem_alloc;
extern const omp_allocator_handle_t omp_pteam_mem_alloc;
extern const omp_allocator_handle_t omp_thread_mem_alloc;
void parallel_master_allocate() {
int a;
omp_allocator_handle_t myalloc = nullptr;
#pragma omp parallel master firstprivate(a) allocate(myalloc:a)
a++;
}
#endif
#endif
// CHECK1-LABEL: define {{[^@]+}}@_Z3foov
// CHECK1-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: call void @_Z8mayThrowv()
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@_Z15parallel_masterv
// CHECK1-SAME: () #[[ATTR2:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR3:[0-9]+]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[EXN_SLOT:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK1-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
// CHECK1-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0
// CHECK1-NEXT: br i1 [[TMP3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
// CHECK1: omp_if.then:
// CHECK1-NEXT: invoke void @_Z3foov()
// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
// CHECK1: invoke.cont:
// CHECK1-NEXT: call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
// CHECK1-NEXT: br label [[OMP_IF_END]]
// CHECK1: lpad:
// CHECK1-NEXT: [[TMP4:%.*]] = landingpad { i8*, i32 }
// CHECK1-NEXT: catch i8* null
// CHECK1-NEXT: [[TMP5:%.*]] = extractvalue { i8*, i32 } [[TMP4]], 0
// CHECK1-NEXT: store i8* [[TMP5]], i8** [[EXN_SLOT]], align 8
// CHECK1-NEXT: [[TMP6:%.*]] = extractvalue { i8*, i32 } [[TMP4]], 1
// CHECK1-NEXT: store i32 [[TMP6]], i32* [[EHSELECTOR_SLOT]], align 4
// CHECK1-NEXT: call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
// CHECK1-NEXT: br label [[TERMINATE_HANDLER:%.*]]
// CHECK1: omp_if.end:
// CHECK1-NEXT: ret void
// CHECK1: terminate.handler:
// CHECK1-NEXT: [[EXN:%.*]] = load i8*, i8** [[EXN_SLOT]], align 8
// CHECK1-NEXT: call void @__clang_call_terminate(i8* [[EXN]]) #[[ATTR6:[0-9]+]]
// CHECK1-NEXT: unreachable
//
//
// CHECK1-LABEL: define {{[^@]+}}@__clang_call_terminate
// CHECK1-SAME: (i8* [[TMP0:%.*]]) #[[ATTR5:[0-9]+]] comdat {
// CHECK1-NEXT: [[TMP2:%.*]] = call i8* @__cxa_begin_catch(i8* [[TMP0]]) #[[ATTR4:[0-9]+]]
// CHECK1-NEXT: call void @_ZSt9terminatev() #[[ATTR6]]
// CHECK1-NEXT: unreachable
//
//
// CHECK2-LABEL: define {{[^@]+}}@_Z3foov
// CHECK2-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: call void @_Z8mayThrowv()
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@_Z15parallel_masterv
// CHECK2-SAME: () #[[ATTR2:[0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR3:[0-9]+]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[EXN_SLOT:%.*]] = alloca i8*, align 8
// CHECK2-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK2-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK2-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
// CHECK2-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0
// CHECK2-NEXT: br i1 [[TMP3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
// CHECK2: omp_if.then:
// CHECK2-NEXT: invoke void @_Z3foov()
// CHECK2-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
// CHECK2: invoke.cont:
// CHECK2-NEXT: call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
// CHECK2-NEXT: br label [[OMP_IF_END]]
// CHECK2: lpad:
// CHECK2-NEXT: [[TMP4:%.*]] = landingpad { i8*, i32 }
// CHECK2-NEXT: catch i8* null
// CHECK2-NEXT: [[TMP5:%.*]] = extractvalue { i8*, i32 } [[TMP4]], 0
// CHECK2-NEXT: store i8* [[TMP5]], i8** [[EXN_SLOT]], align 8
// CHECK2-NEXT: [[TMP6:%.*]] = extractvalue { i8*, i32 } [[TMP4]], 1
// CHECK2-NEXT: store i32 [[TMP6]], i32* [[EHSELECTOR_SLOT]], align 4
// CHECK2-NEXT: call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
// CHECK2-NEXT: br label [[TERMINATE_HANDLER:%.*]]
// CHECK2: omp_if.end:
// CHECK2-NEXT: ret void
// CHECK2: terminate.handler:
// CHECK2-NEXT: [[EXN:%.*]] = load i8*, i8** [[EXN_SLOT]], align 8
// CHECK2-NEXT: call void @__clang_call_terminate(i8* [[EXN]]) #[[ATTR6:[0-9]+]]
// CHECK2-NEXT: unreachable
//
//
// CHECK2-LABEL: define {{[^@]+}}@__clang_call_terminate
// CHECK2-SAME: (i8* [[TMP0:%.*]]) #[[ATTR5:[0-9]+]] comdat {
// CHECK2-NEXT: [[TMP2:%.*]] = call i8* @__cxa_begin_catch(i8* [[TMP0]]) #[[ATTR4:[0-9]+]]
// CHECK2-NEXT: call void @_ZSt9terminatev() #[[ATTR6]]
// CHECK2-NEXT: unreachable
//
//
// CHECK5-LABEL: define {{[^@]+}}@_Z23parallel_master_privatev
// CHECK5-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
// CHECK5-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK5-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK5-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK5-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK5-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
// CHECK5-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0
// CHECK5-NEXT: br i1 [[TMP3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
// CHECK5: omp_if.then:
// CHECK5-NEXT: [[TMP4:%.*]] = load i32, i32* [[A]], align 4
// CHECK5-NEXT: [[INC:%.*]] = add nsw i32 [[TMP4]], 1
// CHECK5-NEXT: store i32 [[INC]], i32* [[A]], align 4
// CHECK5-NEXT: call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
// CHECK5-NEXT: br label [[OMP_IF_END]]
// CHECK5: omp_if.end:
// CHECK5-NEXT: ret void
//
//
// CHECK6-LABEL: define {{[^@]+}}@_Z23parallel_master_privatev
// CHECK6-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK6-NEXT: entry:
// CHECK6-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK6-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
// CHECK6-NEXT: ret void
//
//
// CHECK6-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK6-NEXT: entry:
// CHECK6-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK6-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK6-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK6-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK6-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK6-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK6-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK6-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
// CHECK6-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0
// CHECK6-NEXT: br i1 [[TMP3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
// CHECK6: omp_if.then:
// CHECK6-NEXT: [[TMP4:%.*]] = load i32, i32* [[A]], align 4
// CHECK6-NEXT: [[INC:%.*]] = add nsw i32 [[TMP4]], 1
// CHECK6-NEXT: store i32 [[INC]], i32* [[A]], align 4
// CHECK6-NEXT: call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
// CHECK6-NEXT: br label [[OMP_IF_END]]
// CHECK6: omp_if.end:
// CHECK6-NEXT: ret void
//
//
// CHECK9-LABEL: define {{[^@]+}}@_Z23parallel_master_privatev
// CHECK9-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK9-NEXT: entry:
// CHECK9-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[A]])
// CHECK9-NEXT: ret void
//
//
// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK9-NEXT: entry:
// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK9-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK9-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK9-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK9-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK9-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK9-NEXT: [[TMP4:%.*]] = icmp ne i32 [[TMP3]], 0
// CHECK9-NEXT: br i1 [[TMP4]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
// CHECK9: omp_if.then:
// CHECK9-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK9-NEXT: [[INC:%.*]] = add nsw i32 [[TMP5]], 1
// CHECK9-NEXT: store i32 [[INC]], i32* [[TMP0]], align 4
// CHECK9-NEXT: call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK9-NEXT: br label [[OMP_IF_END]]
// CHECK9: omp_if.end:
// CHECK9-NEXT: ret void
//
//
// CHECK10-LABEL: define {{[^@]+}}@_Z23parallel_master_privatev
// CHECK10-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK10-NEXT: entry:
// CHECK10-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[A]])
// CHECK10-NEXT: ret void
//
//
// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK10-NEXT: entry:
// CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK10-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK10-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK10-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK10-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK10-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK10-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK10-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK10-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK10-NEXT: [[TMP4:%.*]] = icmp ne i32 [[TMP3]], 0
// CHECK10-NEXT: br i1 [[TMP4]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
// CHECK10: omp_if.then:
// CHECK10-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK10-NEXT: [[INC:%.*]] = add nsw i32 [[TMP5]], 1
// CHECK10-NEXT: store i32 [[INC]], i32* [[TMP0]], align 4
// CHECK10-NEXT: call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK10-NEXT: br label [[OMP_IF_END]]
// CHECK10: omp_if.end:
// CHECK10-NEXT: ret void
//
//
// CHECK13-LABEL: define {{[^@]+}}@_Z36parallel_master_default_firstprivatev
// CHECK13-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK13-NEXT: entry:
// CHECK13-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK13-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK13-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4
// CHECK13-NEXT: [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32*
// CHECK13-NEXT: store i32 [[TMP0]], i32* [[CONV]], align 4
// CHECK13-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined. to void (i32*, i32*, ...)*), i64 [[TMP1]])
// CHECK13-NEXT: ret void
//
//
// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK13-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK13-NEXT: entry:
// CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK13-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK13-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK13-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK13-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK13-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK13-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK13-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK13-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
// CHECK13-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0
// CHECK13-NEXT: br i1 [[TMP3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
// CHECK13: omp_if.then:
// CHECK13-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK13-NEXT: [[INC:%.*]] = add nsw i32 [[TMP4]], 1
// CHECK13-NEXT: store i32 [[INC]], i32* [[CONV]], align 8
// CHECK13-NEXT: call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
// CHECK13-NEXT: br label [[OMP_IF_END]]
// CHECK13: omp_if.end:
// CHECK13-NEXT: ret void
//
//
// CHECK14-LABEL: define {{[^@]+}}@_Z36parallel_master_default_firstprivatev
// CHECK14-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK14-NEXT: entry:
// CHECK14-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK14-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK14-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4
// CHECK14-NEXT: [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32*
// CHECK14-NEXT: store i32 [[TMP0]], i32* [[CONV]], align 4
// CHECK14-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined. to void (i32*, i32*, ...)*), i64 [[TMP1]])
// CHECK14-NEXT: ret void
//
//
// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK14-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK14-NEXT: entry:
// CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK14-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK14-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK14-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK14-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK14-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK14-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK14-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK14-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK14-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
// CHECK14-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0
// CHECK14-NEXT: br i1 [[TMP3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
// CHECK14: omp_if.then:
// CHECK14-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK14-NEXT: [[INC:%.*]] = add nsw i32 [[TMP4]], 1
// CHECK14-NEXT: store i32 [[INC]], i32* [[CONV]], align 8
// CHECK14-NEXT: call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
// CHECK14-NEXT: br label [[OMP_IF_END]]
// CHECK14: omp_if.end:
// CHECK14-NEXT: ret void
//
//
// CHECK17-LABEL: define {{[^@]+}}@_Z36parallel_master_default_firstprivatev
// CHECK17-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[A:%.*]] = alloca [[STRUCT_ST:%.*]], align 4
// CHECK17-NEXT: [[Y_CASTED:%.*]] = alloca i64, align 8
// CHECK17-NEXT: call void @_ZN2StC1Ev(%struct.St* nonnull align 4 dereferenceable(8) [[A]])
// CHECK17-NEXT: [[TMP0:%.*]] = load i32, i32* @_ZZ36parallel_master_default_firstprivatevE1y, align 4
// CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[Y_CASTED]] to i32*
// CHECK17-NEXT: store i32 [[TMP0]], i32* [[CONV]], align 4
// CHECK17-NEXT: [[TMP1:%.*]] = load i64, i64* [[Y_CASTED]], align 8
// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.St*, i64)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.St* [[A]], i64 [[TMP1]])
// CHECK17-NEXT: call void @_ZN2StD1Ev(%struct.St* nonnull align 4 dereferenceable(8) [[A]]) #[[ATTR3:[0-9]+]]
// CHECK17-NEXT: ret void
//
//
// CHECK17-LABEL: define {{[^@]+}}@_ZN2StC1Ev
// CHECK17-SAME: (%struct.St* nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.St*, align 8
// CHECK17-NEXT: store %struct.St* [[THIS]], %struct.St** [[THIS_ADDR]], align 8
// CHECK17-NEXT: [[THIS1:%.*]] = load %struct.St*, %struct.St** [[THIS_ADDR]], align 8
// CHECK17-NEXT: call void @_ZN2StC2Ev(%struct.St* nonnull align 4 dereferenceable(8) [[THIS1]])
// CHECK17-NEXT: ret void
//
//
// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.St* nonnull align 4 dereferenceable(8) [[A:%.*]], i64 [[Y:%.*]]) #[[ATTR2:[0-9]+]] {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK17-NEXT: [[A_ADDR:%.*]] = alloca %struct.St*, align 8
// CHECK17-NEXT: [[Y_ADDR:%.*]] = alloca i64, align 8
// CHECK17-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK17-NEXT: store %struct.St* [[A]], %struct.St** [[A_ADDR]], align 8
// CHECK17-NEXT: store i64 [[Y]], i64* [[Y_ADDR]], align 8
// CHECK17-NEXT: [[TMP0:%.*]] = load %struct.St*, %struct.St** [[A_ADDR]], align 8
// CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[Y_ADDR]] to i32*
// CHECK17-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK17-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK17-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK17-NEXT: [[TMP4:%.*]] = icmp ne i32 [[TMP3]], 0
// CHECK17-NEXT: br i1 [[TMP4]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
// CHECK17: omp_if.then:
// CHECK17-NEXT: [[A1:%.*]] = getelementptr inbounds [[STRUCT_ST:%.*]], %struct.St* [[TMP0]], i32 0, i32 0
// CHECK17-NEXT: [[TMP5:%.*]] = load i32, i32* [[A1]], align 4
// CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP5]], 1
// CHECK17-NEXT: store i32 [[ADD]], i32* [[A1]], align 4
// CHECK17-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST]], %struct.St* [[TMP0]], i32 0, i32 1
// CHECK17-NEXT: [[TMP6:%.*]] = load i32, i32* [[B]], align 4
// CHECK17-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], 1
// CHECK17-NEXT: store i32 [[ADD2]], i32* [[B]], align 4
// CHECK17-NEXT: [[TMP7:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK17-NEXT: [[INC:%.*]] = add nsw i32 [[TMP7]], 1
// CHECK17-NEXT: store i32 [[INC]], i32* [[CONV]], align 8
// CHECK17-NEXT: [[TMP8:%.*]] = load i32, i32* @_ZN2St1yE, align 4
// CHECK17-NEXT: [[INC3:%.*]] = add nsw i32 [[TMP8]], 1
// CHECK17-NEXT: store i32 [[INC3]], i32* @_ZN2St1yE, align 4
// CHECK17-NEXT: call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK17-NEXT: br label [[OMP_IF_END]]
// CHECK17: omp_if.end:
// CHECK17-NEXT: ret void
//
//
// CHECK17-LABEL: define {{[^@]+}}@_ZN2StD1Ev
// CHECK17-SAME: (%struct.St* nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR4:[0-9]+]] comdat align 2 {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.St*, align 8
// CHECK17-NEXT: store %struct.St* [[THIS]], %struct.St** [[THIS_ADDR]], align 8
// CHECK17-NEXT: [[THIS1:%.*]] = load %struct.St*, %struct.St** [[THIS_ADDR]], align 8
// CHECK17-NEXT: call void @_ZN2StD2Ev(%struct.St* nonnull align 4 dereferenceable(8) [[THIS1]]) #[[ATTR3]]
// CHECK17-NEXT: ret void
//
//
// CHECK17-LABEL: define {{[^@]+}}@_ZN2StC2Ev
// CHECK17-SAME: (%struct.St* nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR4]] comdat align 2 {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.St*, align 8
// CHECK17-NEXT: store %struct.St* [[THIS]], %struct.St** [[THIS_ADDR]], align 8
// CHECK17-NEXT: [[THIS1:%.*]] = load %struct.St*, %struct.St** [[THIS_ADDR]], align 8
// CHECK17-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_ST:%.*]], %struct.St* [[THIS1]], i32 0, i32 0
// CHECK17-NEXT: store i32 0, i32* [[A]], align 4
// CHECK17-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST]], %struct.St* [[THIS1]], i32 0, i32 1
// CHECK17-NEXT: store i32 0, i32* [[B]], align 4
// CHECK17-NEXT: ret void
//
//
// CHECK17-LABEL: define {{[^@]+}}@_ZN2StD2Ev
// CHECK17-SAME: (%struct.St* nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR4]] comdat align 2 {
// CHECK17-NEXT: entry:
// CHECK17-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.St*, align 8
// CHECK17-NEXT: store %struct.St* [[THIS]], %struct.St** [[THIS_ADDR]], align 8
// CHECK17-NEXT: [[THIS1:%.*]] = load %struct.St*, %struct.St** [[THIS_ADDR]], align 8
// CHECK17-NEXT: ret void
//
//
// CHECK18-LABEL: define {{[^@]+}}@_Z36parallel_master_default_firstprivatev
// CHECK18-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK18-NEXT: entry:
// CHECK18-NEXT: [[A:%.*]] = alloca [[STRUCT_ST:%.*]], align 4
// CHECK18-NEXT: [[Y_CASTED:%.*]] = alloca i64, align 8
// CHECK18-NEXT: call void @_ZN2StC1Ev(%struct.St* nonnull align 4 dereferenceable(8) [[A]])
// CHECK18-NEXT: [[TMP0:%.*]] = load i32, i32* @_ZZ36parallel_master_default_firstprivatevE1y, align 4
// CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[Y_CASTED]] to i32*
// CHECK18-NEXT: store i32 [[TMP0]], i32* [[CONV]], align 4
// CHECK18-NEXT: [[TMP1:%.*]] = load i64, i64* [[Y_CASTED]], align 8
// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.St*, i64)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.St* [[A]], i64 [[TMP1]])
// CHECK18-NEXT: call void @_ZN2StD1Ev(%struct.St* nonnull align 4 dereferenceable(8) [[A]]) #[[ATTR3:[0-9]+]]
// CHECK18-NEXT: ret void
//
//
// CHECK18-LABEL: define {{[^@]+}}@_ZN2StC1Ev
// CHECK18-SAME: (%struct.St* nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 {
// CHECK18-NEXT: entry:
// CHECK18-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.St*, align 8
// CHECK18-NEXT: store %struct.St* [[THIS]], %struct.St** [[THIS_ADDR]], align 8
// CHECK18-NEXT: [[THIS1:%.*]] = load %struct.St*, %struct.St** [[THIS_ADDR]], align 8
// CHECK18-NEXT: call void @_ZN2StC2Ev(%struct.St* nonnull align 4 dereferenceable(8) [[THIS1]])
// CHECK18-NEXT: ret void
//
//
// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.St* nonnull align 4 dereferenceable(8) [[A:%.*]], i64 [[Y:%.*]]) #[[ATTR2:[0-9]+]] {
// CHECK18-NEXT: entry:
// CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK18-NEXT: [[A_ADDR:%.*]] = alloca %struct.St*, align 8
// CHECK18-NEXT: [[Y_ADDR:%.*]] = alloca i64, align 8
// CHECK18-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK18-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK18-NEXT: store %struct.St* [[A]], %struct.St** [[A_ADDR]], align 8
// CHECK18-NEXT: store i64 [[Y]], i64* [[Y_ADDR]], align 8
// CHECK18-NEXT: [[TMP0:%.*]] = load %struct.St*, %struct.St** [[A_ADDR]], align 8
// CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[Y_ADDR]] to i32*
// CHECK18-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK18-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK18-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK18-NEXT: [[TMP4:%.*]] = icmp ne i32 [[TMP3]], 0
// CHECK18-NEXT: br i1 [[TMP4]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
// CHECK18: omp_if.then:
// CHECK18-NEXT: [[A1:%.*]] = getelementptr inbounds [[STRUCT_ST:%.*]], %struct.St* [[TMP0]], i32 0, i32 0
// CHECK18-NEXT: [[TMP5:%.*]] = load i32, i32* [[A1]], align 4
// CHECK18-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP5]], 1
// CHECK18-NEXT: store i32 [[ADD]], i32* [[A1]], align 4
// CHECK18-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST]], %struct.St* [[TMP0]], i32 0, i32 1
// CHECK18-NEXT: [[TMP6:%.*]] = load i32, i32* [[B]], align 4
// CHECK18-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], 1
// CHECK18-NEXT: store i32 [[ADD2]], i32* [[B]], align 4
// CHECK18-NEXT: [[TMP7:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK18-NEXT: [[INC:%.*]] = add nsw i32 [[TMP7]], 1
// CHECK18-NEXT: store i32 [[INC]], i32* [[CONV]], align 8
// CHECK18-NEXT: [[TMP8:%.*]] = load i32, i32* @_ZN2St1yE, align 4
// CHECK18-NEXT: [[INC3:%.*]] = add nsw i32 [[TMP8]], 1
// CHECK18-NEXT: store i32 [[INC3]], i32* @_ZN2St1yE, align 4
// CHECK18-NEXT: call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
// CHECK18-NEXT: br label [[OMP_IF_END]]
// CHECK18: omp_if.end:
// CHECK18-NEXT: ret void
//
//
// CHECK18-LABEL: define {{[^@]+}}@_ZN2StD1Ev
// CHECK18-SAME: (%struct.St* nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR4:[0-9]+]] comdat align 2 {
// CHECK18-NEXT: entry:
// CHECK18-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.St*, align 8
// CHECK18-NEXT: store %struct.St* [[THIS]], %struct.St** [[THIS_ADDR]], align 8
// CHECK18-NEXT: [[THIS1:%.*]] = load %struct.St*, %struct.St** [[THIS_ADDR]], align 8
// CHECK18-NEXT: call void @_ZN2StD2Ev(%struct.St* nonnull align 4 dereferenceable(8) [[THIS1]]) #[[ATTR3]]
// CHECK18-NEXT: ret void
//
//
// CHECK18-LABEL: define {{[^@]+}}@_ZN2StC2Ev
// CHECK18-SAME: (%struct.St* nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR4]] comdat align 2 {
// CHECK18-NEXT: entry:
// CHECK18-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.St*, align 8
// CHECK18-NEXT: store %struct.St* [[THIS]], %struct.St** [[THIS_ADDR]], align 8
// CHECK18-NEXT: [[THIS1:%.*]] = load %struct.St*, %struct.St** [[THIS_ADDR]], align 8
// CHECK18-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_ST:%.*]], %struct.St* [[THIS1]], i32 0, i32 0
// CHECK18-NEXT: store i32 0, i32* [[A]], align 4
// CHECK18-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST]], %struct.St* [[THIS1]], i32 0, i32 1
// CHECK18-NEXT: store i32 0, i32* [[B]], align 4
// CHECK18-NEXT: ret void
//
//
// CHECK18-LABEL: define {{[^@]+}}@_ZN2StD2Ev
// CHECK18-SAME: (%struct.St* nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR4]] comdat align 2 {
// CHECK18-NEXT: entry:
// CHECK18-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.St*, align 8
// CHECK18-NEXT: store %struct.St* [[THIS]], %struct.St** [[THIS_ADDR]], align 8
// CHECK18-NEXT: [[THIS1:%.*]] = load %struct.St*, %struct.St** [[THIS_ADDR]], align 8
// CHECK18-NEXT: ret void
//
//
// CHECK21-LABEL: define {{[^@]+}}@_Z28parallel_master_firstprivatev
// CHECK21-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK21-NEXT: entry:
// CHECK21-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK21-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK21-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4
// CHECK21-NEXT: [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32*
// CHECK21-NEXT: store i32 [[TMP0]], i32* [[CONV]], align 4
// CHECK21-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
// CHECK21-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined. to void (i32*, i32*, ...)*), i64 [[TMP1]])
// CHECK21-NEXT: ret void
//
//
// CHECK21-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK21-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK21-NEXT: entry:
// CHECK21-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK21-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK21-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK21-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK21-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK21-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK21-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK21-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK21-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK21-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
// CHECK21-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0
// CHECK21-NEXT: br i1 [[TMP3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
// CHECK21: omp_if.then:
// CHECK21-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK21-NEXT: [[INC:%.*]] = add nsw i32 [[TMP4]], 1
// CHECK21-NEXT: store i32 [[INC]], i32* [[CONV]], align 8
// CHECK21-NEXT: call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
// CHECK21-NEXT: br label [[OMP_IF_END]]
// CHECK21: omp_if.end:
// CHECK21-NEXT: ret void
//
//
// CHECK22-LABEL: define {{[^@]+}}@_Z28parallel_master_firstprivatev
// CHECK22-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK22-NEXT: entry:
// CHECK22-NEXT: [[A:%.*]] = alloca i32, align 4
// CHECK22-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK22-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4
// CHECK22-NEXT: [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32*
// CHECK22-NEXT: store i32 [[TMP0]], i32* [[CONV]], align 4
// CHECK22-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
// CHECK22-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined. to void (i32*, i32*, ...)*), i64 [[TMP1]])
// CHECK22-NEXT: ret void
//
//
// CHECK22-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK22-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK22-NEXT: entry:
// CHECK22-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK22-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK22-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK22-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK22-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK22-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
// CHECK22-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
// CHECK22-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK22-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK22-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
// CHECK22-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0
// CHECK22-NEXT: br i1 [[TMP3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
// CHECK22: omp_if.then:
// CHECK22-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV]], align 8
// CHECK22-NEXT: [[INC:%.*]] = add nsw i32 [[TMP4]], 1
// CHECK22-NEXT: store i32 [[INC]], i32* [[CONV]], align 8
// CHECK22-NEXT: call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
// CHECK22-NEXT: br label [[OMP_IF_END]]
// CHECK22: omp_if.end:
// CHECK22-NEXT: ret void
//
//
// CHECK25-LABEL: define {{[^@]+}}@_Z22parallel_master_copyinv
// CHECK25-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK25-NEXT: entry:
// CHECK25-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
// CHECK25-NEXT: ret void
//
//
// CHECK25-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK25-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK25-NEXT: entry:
// CHECK25-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK25-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK25-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK25-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK25-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK25-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK25-NEXT: [[TMP2:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i8* bitcast (i32* @a to i8*), i64 4, i8*** @a.cache.)
// CHECK25-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to i32*
// CHECK25-NEXT: [[TMP4:%.*]] = ptrtoint i32* [[TMP3]] to i64
// CHECK25-NEXT: [[TMP5:%.*]] = icmp ne i64 ptrtoint (i32* @a to i64), [[TMP4]]
// CHECK25-NEXT: br i1 [[TMP5]], label [[COPYIN_NOT_MASTER:%.*]], label [[COPYIN_NOT_MASTER_END:%.*]]
// CHECK25: copyin.not.master:
// CHECK25-NEXT: [[TMP6:%.*]] = load i32, i32* @a, align 4
// CHECK25-NEXT: store i32 [[TMP6]], i32* [[TMP3]], align 4
// CHECK25-NEXT: br label [[COPYIN_NOT_MASTER_END]]
// CHECK25: copyin.not.master.end:
// CHECK25-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP1]])
// CHECK25-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
// CHECK25-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0
// CHECK25-NEXT: br i1 [[TMP8]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
// CHECK25: omp_if.then:
// CHECK25-NEXT: [[TMP9:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i8* bitcast (i32* @a to i8*), i64 4, i8*** @a.cache.)
// CHECK25-NEXT: [[TMP10:%.*]] = bitcast i8* [[TMP9]] to i32*
// CHECK25-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
// CHECK25-NEXT: [[INC:%.*]] = add nsw i32 [[TMP11]], 1
// CHECK25-NEXT: store i32 [[INC]], i32* [[TMP10]], align 4
// CHECK25-NEXT: call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
// CHECK25-NEXT: br label [[OMP_IF_END]]
// CHECK25: omp_if.end:
// CHECK25-NEXT: ret void
//
//
// CHECK26-LABEL: define {{[^@]+}}@_Z22parallel_master_copyinv
// CHECK26-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK26-NEXT: entry:
// CHECK26-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
// CHECK26-NEXT: ret void
//
//
// CHECK26-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK26-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK26-NEXT: entry:
// CHECK26-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK26-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK26-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK26-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK26-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK26-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK26-NEXT: [[TMP2:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i8* bitcast (i32* @a to i8*), i64 4, i8*** @a.cache.)
// CHECK26-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to i32*
// CHECK26-NEXT: [[TMP4:%.*]] = ptrtoint i32* [[TMP3]] to i64
// CHECK26-NEXT: [[TMP5:%.*]] = icmp ne i64 ptrtoint (i32* @a to i64), [[TMP4]]
// CHECK26-NEXT: br i1 [[TMP5]], label [[COPYIN_NOT_MASTER:%.*]], label [[COPYIN_NOT_MASTER_END:%.*]]
// CHECK26: copyin.not.master:
// CHECK26-NEXT: [[TMP6:%.*]] = load i32, i32* @a, align 4
// CHECK26-NEXT: store i32 [[TMP6]], i32* [[TMP3]], align 4
// CHECK26-NEXT: br label [[COPYIN_NOT_MASTER_END]]
// CHECK26: copyin.not.master.end:
// CHECK26-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP1]])
// CHECK26-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
// CHECK26-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0
// CHECK26-NEXT: br i1 [[TMP8]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
// CHECK26: omp_if.then:
// CHECK26-NEXT: [[TMP9:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i8* bitcast (i32* @a to i8*), i64 4, i8*** @a.cache.)
// CHECK26-NEXT: [[TMP10:%.*]] = bitcast i8* [[TMP9]] to i32*
// CHECK26-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
// CHECK26-NEXT: [[INC:%.*]] = add nsw i32 [[TMP11]], 1
// CHECK26-NEXT: store i32 [[INC]], i32* [[TMP10]], align 4
// CHECK26-NEXT: call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
// CHECK26-NEXT: br label [[OMP_IF_END]]
// CHECK26: omp_if.end:
// CHECK26-NEXT: ret void
//
//
// CHECK29-LABEL: define {{[^@]+}}@_Z22parallel_master_copyinv
// CHECK29-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK29-NEXT: entry:
// CHECK29-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* @a)
// CHECK29-NEXT: ret void
//
//
// CHECK29-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK29-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK29-NEXT: entry:
// CHECK29-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK29-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK29-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK29-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK29-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK29-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK29-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK29-NEXT: [[TMP1:%.*]] = ptrtoint i32* [[TMP0]] to i64
// CHECK29-NEXT: [[TMP2:%.*]] = icmp ne i64 [[TMP1]], ptrtoint (i32* @a to i64)
// CHECK29-NEXT: br i1 [[TMP2]], label [[COPYIN_NOT_MASTER:%.*]], label [[COPYIN_NOT_MASTER_END:%.*]]
// CHECK29: copyin.not.master:
// CHECK29-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK29-NEXT: store i32 [[TMP3]], i32* @a, align 4
// CHECK29-NEXT: br label [[COPYIN_NOT_MASTER_END]]
// CHECK29: copyin.not.master.end:
// CHECK29-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK29-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
// CHECK29-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP5]])
// CHECK29-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK29-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
// CHECK29-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]])
// CHECK29-NEXT: [[TMP9:%.*]] = icmp ne i32 [[TMP8]], 0
// CHECK29-NEXT: br i1 [[TMP9]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
// CHECK29: omp_if.then:
// CHECK29-NEXT: [[TMP10:%.*]] = load i32, i32* @a, align 4
// CHECK29-NEXT: [[INC:%.*]] = add nsw i32 [[TMP10]], 1
// CHECK29-NEXT: store i32 [[INC]], i32* @a, align 4
// CHECK29-NEXT: call void @__kmpc_end_master(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]])
// CHECK29-NEXT: br label [[OMP_IF_END]]
// CHECK29: omp_if.end:
// CHECK29-NEXT: ret void
//
//
// CHECK29-LABEL: define {{[^@]+}}@_ZTW1a
// CHECK29-SAME: () #[[ATTR4:[0-9]+]] comdat {
// CHECK29-NEXT: ret i32* @a
//
//