blob: acb891aa1c7a04adde4e79bd9f6b45013bbb3b1e [file] [log] [blame]
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=50 -x c++ -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK1
// RUN: %clang_cc1 -fopenmp -fopenmp-version=50 -x c++ -std=c++11 -triple x86_64-apple-darwin10 -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp -fopenmp-version=50 -x c++ -triple x86_64-apple-darwin10 -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK2
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=50 -x c++ -std=c++11 -DLAMBDA -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK3
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=50 -x c++ -fblocks -DBLOCKS -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK4
// RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=50 -x c++ -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=50 -x c++ -std=c++11 -triple x86_64-apple-darwin10 -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=50 -x c++ -triple x86_64-apple-darwin10 -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
// RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=50 -x c++ -std=c++11 -DLAMBDA -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
// RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=50 -x c++ -fblocks -DBLOCKS -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
// expected-no-diagnostics
#ifndef HEADER
#define HEADER
volatile int g __attribute__((aligned(128))) = 1212;
template <class T>
struct S {
T f;
S(T a) : f(a + g) {}
S() : f(g) {}
operator T() { return T(); }
S &operator&(const S &) { return *this; }
~S() {}
};
struct SS {
int a;
int b : 4;
int &c;
SS(int &d) : a(0), b(0), c(d) {
#pragma omp parallel reduction(default, +: a, b, c)
#ifdef LAMBDA
[&]() {
++this->a, --b, (this)->c /= 1;
#pragma omp parallel reduction(&: a, b, c)
++(this)->a, --b, this->c /= 1;
}();
#elif defined(BLOCKS)
^{
++a;
--this->b;
(this)->c /= 1;
#pragma omp parallel reduction(-: a, b, c)
++(this)->a, --b, this->c /= 1;
}();
#else
++this->a, --b, c /= 1;
#endif
}
};
template<typename T>
struct SST {
T a;
SST() : a(T()) {
#pragma omp parallel reduction(*: a)
#ifdef LAMBDA
[&]() {
[&]() {
++this->a;
#pragma omp parallel reduction(&& :a)
++(this)->a;
}();
}();
#elif defined(BLOCKS)
^{
^{
++a;
#pragma omp parallel reduction(|: a)
++(this)->a;
}();
}();
#else
++(this)->a;
#endif
}
};
void foo_array_sect(short x[1]) {
#pragma omp parallel reduction(default, + : x[:])
{}
}
template <typename T>
T tmain() {
T t;
S<T> test;
SST<T> sst;
T t_var __attribute__((aligned(128))) = T(), t_var1 __attribute__((aligned(128)));
T vec[] = {1, 2};
S<T> s_arr[] = {1, 2};
S<T> var __attribute__((aligned(128))) (3), var1 __attribute__((aligned(128)));
#pragma omp parallel reduction(+:t_var) reduction(&:var) reduction(&& : var1) reduction(min: t_var1)
{
vec[0] = t_var;
s_arr[0] = var;
}
return T();
}
int sivar;
int main() {
SS ss(sivar);
#ifdef LAMBDA
[&]() {
#pragma omp parallel reduction(+:g)
{
// Reduction list for runtime.
g = 1;
[&]() {
g = 2;
}();
}
}();
return 0;
#elif defined(BLOCKS)
^{
#pragma omp parallel reduction(-:g)
{
// Reduction list for runtime.
g = 1;
^{
g = 2;
}();
}
}();
return 0;
#else
S<float> test;
float t_var = 0, t_var1;
int vec[] = {1, 2};
S<float> s_arr[] = {1, 2};
S<float> var(3), var1;
float _Complex cf;
#pragma omp parallel reduction(+:t_var) reduction(&:var) reduction(&& : var1) reduction(min: t_var1)
{
vec[0] = t_var;
s_arr[0] = var;
}
if (var1)
#pragma omp parallel reduction(+ : t_var) reduction(& : var) reduction(&& : var1) reduction(min : t_var1)
while (1) {
vec[0] = t_var;
s_arr[0] = var;
}
#pragma omp parallel reduction(+ : cf)
;
return tmain<int>();
#endif
}
// Reduction list for runtime.
// For + reduction operation initial value of private variable is 0.
// For & reduction operation initial value of private variable is ones in all bits.
// For && reduction operation initial value of private variable is 1.0.
// For min reduction operation initial value of private variable is largest repesentable value.
// Skip checks for internal operations.
// void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
// res = __kmpc_reduce_nowait(<loc>, <gtid>, <n>, sizeof(RedList), RedList, reduce_func, &<lock>);
// switch(res)
// case 1:
// t_var += t_var_reduction;
// var = var.operator &(var_reduction);
// var1 = var1.operator &&(var1_reduction);
// t_var1 = min(t_var1, t_var1_reduction);
// __kmpc_end_reduce_nowait(<loc>, <gtid>, &<lock>);
// break;
// case 2:
// t_var += t_var_reduction;
// var = var.operator &(var_reduction);
// var1 = var1.operator &&(var1_reduction);
// t_var1 = min(t_var1, t_var1_reduction);
// break;
// void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
// *(Type0*)lhs[0] = ReductionOperation0(*(Type0*)lhs[0], *(Type0*)rhs[0]);
// ...
// *(Type<n>-1*)lhs[<n>-1] = ReductionOperation<n>-1(*(Type<n>-1*)lhs[<n>-1],
// *(Type<n>-1*)rhs[<n>-1]);
// }
// t_var_lhs = (float*)lhs[0];
// t_var_rhs = (float*)rhs[0];
// var_lhs = (S<float>*)lhs[1];
// var_rhs = (S<float>*)rhs[1];
// var1_lhs = (S<float>*)lhs[2];
// var1_rhs = (S<float>*)rhs[2];
// t_var1_lhs = (float*)lhs[3];
// t_var1_rhs = (float*)rhs[3];
// t_var_lhs += t_var_rhs;
// var_lhs = var_lhs.operator &(var_rhs);
// var1_lhs = var1_lhs.operator &&(var1_rhs);
// t_var1_lhs = min(t_var1_lhs, t_var1_rhs);
// For + reduction operation initial value of private variable is 0.
// For & reduction operation initial value of private variable is ones in all bits.
// For && reduction operation initial value of private variable is 1.0.
// For min reduction operation initial value of private variable is largest repesentable value.
// Reduction list for runtime.
// For + reduction operation initial value of private variable is 0.
// For & reduction operation initial value of private variable is ones in all bits.
// For && reduction operation initial value of private variable is 1.0.
// For min reduction operation initial value of private variable is largest repesentable value.
// Skip checks for internal operations.
// void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
// res = __kmpc_reduce_nowait(<loc>, <gtid>, <n>, sizeof(RedList), RedList, reduce_func, &<lock>);
// switch(res)
// case 1:
// t_var += t_var_reduction;
// var = var.operator &(var_reduction);
// var1 = var1.operator &&(var1_reduction);
// t_var1 = min(t_var1, t_var1_reduction);
// __kmpc_end_reduce_nowait(<loc>, <gtid>, &<lock>);
// break;
// case 2:
// t_var += t_var_reduction;
// var = var.operator &(var_reduction);
// var1 = var1.operator &&(var1_reduction);
// t_var1 = min(t_var1, t_var1_reduction);
// break;
// void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
// *(Type0*)lhs[0] = ReductionOperation0(*(Type0*)lhs[0], *(Type0*)rhs[0]);
// ...
// *(Type<n>-1*)lhs[<n>-1] = ReductionOperation<n>-1(*(Type<n>-1*)lhs[<n>-1],
// *(Type<n>-1*)rhs[<n>-1]);
// }
// t_var_lhs = (i{{[0-9]+}}*)lhs[0];
// t_var_rhs = (i{{[0-9]+}}*)rhs[0];
// var_lhs = (S<i{{[0-9]+}}>*)lhs[1];
// var_rhs = (S<i{{[0-9]+}}>*)rhs[1];
// var1_lhs = (S<i{{[0-9]+}}>*)lhs[2];
// var1_rhs = (S<i{{[0-9]+}}>*)rhs[2];
// t_var1_lhs = (i{{[0-9]+}}*)lhs[3];
// t_var1_rhs = (i{{[0-9]+}}*)rhs[3];
// t_var_lhs += t_var_rhs;
// var_lhs = var_lhs.operator &(var_rhs);
// var1_lhs = var1_lhs.operator &&(var1_rhs);
// t_var1_lhs = min(t_var1_lhs, t_var1_rhs);
#endif
// CHECK1-LABEL: define {{[^@]+}}@_Z14foo_array_sectPs
// CHECK1-SAME: (i16* [[X:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[X_ADDR:%.*]] = alloca i16*, align 8
// CHECK1-NEXT: store i16* [[X]], i16** [[X_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i16*, i16** [[X_ADDR]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i16*)* @.omp_outlined. to void (i32*, i32*, ...)*), i16* [[TMP0]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i16* [[X:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[X_ADDR:%.*]] = alloca i16*, align 8
// CHECK1-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[TMP:%.*]] = alloca i16*, align 8
// CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x i8*], align 8
// CHECK1-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i16, align 2
// CHECK1-NEXT: [[_TMP13:%.*]] = alloca i16, align 2
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store i16* [[X]], i16** [[X_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i16*, i16** [[X_ADDR]], align 8
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP0]], i64 0
// CHECK1-NEXT: [[TMP1:%.*]] = load i16*, i16** [[X_ADDR]], align 8
// CHECK1-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i16, i16* [[TMP1]], i64 0
// CHECK1-NEXT: [[TMP2:%.*]] = ptrtoint i16* [[ARRAYIDX1]] to i64
// CHECK1-NEXT: [[TMP3:%.*]] = ptrtoint i16* [[ARRAYIDX]] to i64
// CHECK1-NEXT: [[TMP4:%.*]] = sub i64 [[TMP2]], [[TMP3]]
// CHECK1-NEXT: [[TMP5:%.*]] = sdiv exact i64 [[TMP4]], ptrtoint (i16* getelementptr (i16, i16* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP6:%.*]] = add nuw i64 [[TMP5]], 1
// CHECK1-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], ptrtoint (i16* getelementptr (i16, i16* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP8:%.*]] = call i8* @llvm.stacksave()
// CHECK1-NEXT: store i8* [[TMP8]], i8** [[SAVED_STACK]], align 8
// CHECK1-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP6]], align 16
// CHECK1-NEXT: store i64 [[TMP6]], i64* [[__VLA_EXPR0]], align 8
// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr i16, i16* [[VLA]], i64 [[TMP6]]
// CHECK1-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i16* [[VLA]], [[TMP9]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]]
// CHECK1: omp.arrayinit.body:
// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i16* [ [[VLA]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ]
// CHECK1-NEXT: store i16 0, i16* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 2
// CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i16, i16* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i16* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP9]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]]
// CHECK1: omp.arrayinit.done:
// CHECK1-NEXT: [[TMP10:%.*]] = load i16*, i16** [[X_ADDR]], align 8
// CHECK1-NEXT: [[TMP11:%.*]] = ptrtoint i16* [[TMP10]] to i64
// CHECK1-NEXT: [[TMP12:%.*]] = ptrtoint i16* [[ARRAYIDX]] to i64
// CHECK1-NEXT: [[TMP13:%.*]] = sub i64 [[TMP11]], [[TMP12]]
// CHECK1-NEXT: [[TMP14:%.*]] = sdiv exact i64 [[TMP13]], ptrtoint (i16* getelementptr (i16, i16* null, i32 1) to i64)
// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr i16, i16* [[VLA]], i64 [[TMP14]]
// CHECK1-NEXT: store i16* [[TMP15]], i16** [[TMP]], align 8
// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// CHECK1-NEXT: [[TMP17:%.*]] = bitcast i16* [[VLA]] to i8*
// CHECK1-NEXT: store i8* [[TMP17]], i8** [[TMP16]], align 8
// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1
// CHECK1-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP6]] to i8*
// CHECK1-NEXT: store i8* [[TMP19]], i8** [[TMP18]], align 8
// CHECK1-NEXT: [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
// CHECK1-NEXT: [[TMP22:%.*]] = bitcast [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// CHECK1-NEXT: [[TMP23:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP21]], i32 1, i64 16, i8* [[TMP22]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK1-NEXT: switch i32 [[TMP23]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
// CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
// CHECK1-NEXT: ]
// CHECK1: .omp.reduction.case1:
// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr i16, i16* [[ARRAYIDX]], i64 [[TMP6]]
// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i16* [[ARRAYIDX]], [[TMP24]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE7:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
// CHECK1: omp.arraycpy.body:
// CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i16* [ [[VLA]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST2:%.*]] = phi i16* [ [[ARRAYIDX]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT5:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK1-NEXT: [[TMP25:%.*]] = load i16, i16* [[OMP_ARRAYCPY_DESTELEMENTPAST2]], align 2
// CHECK1-NEXT: [[CONV:%.*]] = sext i16 [[TMP25]] to i32
// CHECK1-NEXT: [[TMP26:%.*]] = load i16, i16* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 2
// CHECK1-NEXT: [[CONV3:%.*]] = sext i16 [[TMP26]] to i32
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CONV3]]
// CHECK1-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD]] to i16
// CHECK1-NEXT: store i16 [[CONV4]], i16* [[OMP_ARRAYCPY_DESTELEMENTPAST2]], align 2
// CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT5]] = getelementptr i16, i16* [[OMP_ARRAYCPY_DESTELEMENTPAST2]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i16, i16* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE6:%.*]] = icmp eq i16* [[OMP_ARRAYCPY_DEST_ELEMENT5]], [[TMP24]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE6]], label [[OMP_ARRAYCPY_DONE7]], label [[OMP_ARRAYCPY_BODY]]
// CHECK1: omp.arraycpy.done7:
// CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK1: .omp.reduction.case2:
// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr i16, i16* [[ARRAYIDX]], i64 [[TMP6]]
// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY8:%.*]] = icmp eq i16* [[ARRAYIDX]], [[TMP27]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY8]], label [[OMP_ARRAYCPY_DONE21:%.*]], label [[OMP_ARRAYCPY_BODY9:%.*]]
// CHECK1: omp.arraycpy.body9:
// CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST10:%.*]] = phi i16* [ [[VLA]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT19:%.*]], [[ATOMIC_EXIT:%.*]] ]
// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST11:%.*]] = phi i16* [ [[ARRAYIDX]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT18:%.*]], [[ATOMIC_EXIT]] ]
// CHECK1-NEXT: [[TMP28:%.*]] = load i16, i16* [[OMP_ARRAYCPY_SRCELEMENTPAST10]], align 2
// CHECK1-NEXT: [[CONV12:%.*]] = sext i16 [[TMP28]] to i32
// CHECK1-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic i16, i16* [[OMP_ARRAYCPY_DESTELEMENTPAST11]] monotonic, align 2
// CHECK1-NEXT: br label [[ATOMIC_CONT:%.*]]
// CHECK1: atomic_cont:
// CHECK1-NEXT: [[TMP29:%.*]] = phi i16 [ [[ATOMIC_LOAD]], [[OMP_ARRAYCPY_BODY9]] ], [ [[TMP34:%.*]], [[ATOMIC_CONT]] ]
// CHECK1-NEXT: store i16 [[TMP29]], i16* [[_TMP13]], align 2
// CHECK1-NEXT: [[TMP30:%.*]] = load i16, i16* [[_TMP13]], align 2
// CHECK1-NEXT: [[CONV14:%.*]] = sext i16 [[TMP30]] to i32
// CHECK1-NEXT: [[TMP31:%.*]] = load i16, i16* [[OMP_ARRAYCPY_SRCELEMENTPAST10]], align 2
// CHECK1-NEXT: [[CONV15:%.*]] = sext i16 [[TMP31]] to i32
// CHECK1-NEXT: [[ADD16:%.*]] = add nsw i32 [[CONV14]], [[CONV15]]
// CHECK1-NEXT: [[CONV17:%.*]] = trunc i32 [[ADD16]] to i16
// CHECK1-NEXT: store i16 [[CONV17]], i16* [[ATOMIC_TEMP]], align 2
// CHECK1-NEXT: [[TMP32:%.*]] = load i16, i16* [[ATOMIC_TEMP]], align 2
// CHECK1-NEXT: [[TMP33:%.*]] = cmpxchg i16* [[OMP_ARRAYCPY_DESTELEMENTPAST11]], i16 [[TMP29]], i16 [[TMP32]] monotonic monotonic, align 2
// CHECK1-NEXT: [[TMP34]] = extractvalue { i16, i1 } [[TMP33]], 0
// CHECK1-NEXT: [[TMP35:%.*]] = extractvalue { i16, i1 } [[TMP33]], 1
// CHECK1-NEXT: br i1 [[TMP35]], label [[ATOMIC_EXIT]], label [[ATOMIC_CONT]]
// CHECK1: atomic_exit:
// CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT18]] = getelementptr i16, i16* [[OMP_ARRAYCPY_DESTELEMENTPAST11]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT19]] = getelementptr i16, i16* [[OMP_ARRAYCPY_SRCELEMENTPAST10]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE20:%.*]] = icmp eq i16* [[OMP_ARRAYCPY_DEST_ELEMENT18]], [[TMP27]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE20]], label [[OMP_ARRAYCPY_DONE21]], label [[OMP_ARRAYCPY_BODY9]]
// CHECK1: omp.arraycpy.done21:
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK1: .omp.reduction.default:
// CHECK1-NEXT: [[TMP36:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
// CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP36]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func
// CHECK1-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR3:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
// CHECK1-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [2 x i8*]*
// CHECK1-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [2 x i8*]*
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i64 0, i64 0
// CHECK1-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i16*
// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP3]], i64 0, i64 0
// CHECK1-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
// CHECK1-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i16*
// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP3]], i64 0, i64 1
// CHECK1-NEXT: [[TMP13:%.*]] = load i8*, i8** [[TMP12]], align 8
// CHECK1-NEXT: [[TMP14:%.*]] = ptrtoint i8* [[TMP13]] to i64
// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr i16, i16* [[TMP11]], i64 [[TMP14]]
// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i16* [[TMP11]], [[TMP15]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE4:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
// CHECK1: omp.arraycpy.body:
// CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i16* [ [[TMP8]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i16* [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK1-NEXT: [[TMP16:%.*]] = load i16, i16* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 2
// CHECK1-NEXT: [[CONV:%.*]] = sext i16 [[TMP16]] to i32
// CHECK1-NEXT: [[TMP17:%.*]] = load i16, i16* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 2
// CHECK1-NEXT: [[CONV2:%.*]] = sext i16 [[TMP17]] to i32
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CONV2]]
// CHECK1-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD]] to i16
// CHECK1-NEXT: store i16 [[CONV3]], i16* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 2
// CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i16, i16* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i16, i16* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i16* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP15]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE4]], label [[OMP_ARRAYCPY_BODY]]
// CHECK1: omp.arraycpy.done4:
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@main
// CHECK1-SAME: () #[[ATTR6:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[SS:%.*]] = alloca [[STRUCT_SS:%.*]], align 8
// CHECK1-NEXT: [[TEST:%.*]] = alloca [[STRUCT_S:%.*]], align 4
// CHECK1-NEXT: [[T_VAR:%.*]] = alloca float, align 4
// CHECK1-NEXT: [[T_VAR1:%.*]] = alloca float, align 4
// CHECK1-NEXT: [[VEC:%.*]] = alloca [2 x i32], align 4
// CHECK1-NEXT: [[S_ARR:%.*]] = alloca [2 x %struct.S], align 4
// CHECK1-NEXT: [[VAR:%.*]] = alloca [[STRUCT_S]], align 4
// CHECK1-NEXT: [[VAR1:%.*]] = alloca [[STRUCT_S]], align 4
// CHECK1-NEXT: [[CF:%.*]] = alloca { float, float }, align 4
// CHECK1-NEXT: store i32 0, i32* [[RETVAL]], align 4
// CHECK1-NEXT: call void @_ZN2SSC1ERi(%struct.SS* nonnull align 8 dereferenceable(16) [[SS]], i32* nonnull align 4 dereferenceable(4) @sivar)
// CHECK1-NEXT: call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[TEST]])
// CHECK1-NEXT: store float 0.000000e+00, float* [[T_VAR]], align 4
// CHECK1-NEXT: [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const.main.vec to i8*), i64 8, i1 false)
// CHECK1-NEXT: [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i64 0, i64 0
// CHECK1-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN]], float 1.000000e+00)
// CHECK1-NEXT: [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYINIT_BEGIN]], i64 1
// CHECK1-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], float 2.000000e+00)
// CHECK1-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[VAR]], float 3.000000e+00)
// CHECK1-NEXT: call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR1]])
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [2 x i32]*, float*, [2 x %struct.S]*, %struct.S*, %struct.S*, float*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), [2 x i32]* [[VEC]], float* [[T_VAR]], [2 x %struct.S]* [[S_ARR]], %struct.S* [[VAR]], %struct.S* [[VAR1]], float* [[T_VAR1]])
// CHECK1-NEXT: [[CALL:%.*]] = call float @_ZN1SIfEcvfEv(%struct.S* nonnull align 4 dereferenceable(4) [[VAR1]])
// CHECK1-NEXT: [[TOBOOL:%.*]] = fcmp une float [[CALL]], 0.000000e+00
// CHECK1-NEXT: br i1 [[TOBOOL]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
// CHECK1: if.then:
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [2 x i32]*, float*, [2 x %struct.S]*, %struct.S*, %struct.S*, float*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), [2 x i32]* [[VEC]], float* [[T_VAR]], [2 x %struct.S]* [[S_ARR]], %struct.S* [[VAR]], %struct.S* [[VAR1]], float* [[T_VAR1]])
// CHECK1-NEXT: br label [[IF_END]]
// CHECK1: if.end:
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, { float, float }*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), { float, float }* [[CF]])
// CHECK1-NEXT: [[CALL1:%.*]] = call i32 @_Z5tmainIiET_v()
// CHECK1-NEXT: store i32 [[CALL1]], i32* [[RETVAL]], align 4
// CHECK1-NEXT: call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR1]]) #[[ATTR5:[0-9]+]]
// CHECK1-NEXT: call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR]]) #[[ATTR5]]
// CHECK1-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 0
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN]], i64 2
// CHECK1-NEXT: br label [[ARRAYDESTROY_BODY:%.*]]
// CHECK1: arraydestroy.body:
// CHECK1-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP1]], [[IF_END]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
// CHECK1-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
// CHECK1-NEXT: call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR5]]
// CHECK1-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
// CHECK1-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE2:%.*]], label [[ARRAYDESTROY_BODY]]
// CHECK1: arraydestroy.done2:
// CHECK1-NEXT: call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR5]]
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[RETVAL]], align 4
// CHECK1-NEXT: ret i32 [[TMP2]]
//
//
// CHECK1-LABEL: define {{[^@]+}}@_ZN2SSC1ERi
// CHECK1-SAME: (%struct.SS* nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32* nonnull align 4 dereferenceable(4) [[D:%.*]]) unnamed_addr #[[ATTR7:[0-9]+]] align 2 {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK1-NEXT: [[D_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK1-NEXT: store i32* [[D]], i32** [[D_ADDR]], align 8
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[D_ADDR]], align 8
// CHECK1-NEXT: call void @_ZN2SSC2ERi(%struct.SS* nonnull align 8 dereferenceable(16) [[THIS1]], i32* nonnull align 4 dereferenceable(4) [[TMP0]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ev
// CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR7]] align 2 {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
// CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
// CHECK1-NEXT: call void @_ZN1SIfEC2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ef
// CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR7]] align 2 {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca float, align 4
// CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
// CHECK1-NEXT: store float [[A]], float* [[A_ADDR]], align 4
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
// CHECK1-NEXT: call void @_ZN1SIfEC2Ef(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]], float [[TMP0]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [2 x i32]* nonnull align 4 dereferenceable(8) [[VEC:%.*]], float* nonnull align 4 dereferenceable(4) [[T_VAR:%.*]], [2 x %struct.S]* nonnull align 4 dereferenceable(8) [[S_ARR:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[VAR:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[VAR1:%.*]], float* nonnull align 4 dereferenceable(4) [[T_VAR1:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 8
// CHECK1-NEXT: [[T_VAR_ADDR:%.*]] = alloca float*, align 8
// CHECK1-NEXT: [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S]*, align 8
// CHECK1-NEXT: [[VAR_ADDR:%.*]] = alloca %struct.S*, align 8
// CHECK1-NEXT: [[VAR1_ADDR:%.*]] = alloca %struct.S*, align 8
// CHECK1-NEXT: [[T_VAR1_ADDR:%.*]] = alloca float*, align 8
// CHECK1-NEXT: [[T_VAR2:%.*]] = alloca float, align 4
// CHECK1-NEXT: [[VAR3:%.*]] = alloca [[STRUCT_S:%.*]], align 4
// CHECK1-NEXT: [[VAR14:%.*]] = alloca [[STRUCT_S]], align 4
// CHECK1-NEXT: [[T_VAR15:%.*]] = alloca float, align 4
// CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [4 x i8*], align 8
// CHECK1-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S]], align 4
// CHECK1-NEXT: [[ATOMIC_TEMP:%.*]] = alloca float, align 4
// CHECK1-NEXT: [[TMP:%.*]] = alloca float, align 4
// CHECK1-NEXT: [[REF_TMP13:%.*]] = alloca [[STRUCT_S]], align 4
// CHECK1-NEXT: [[ATOMIC_TEMP23:%.*]] = alloca float, align 4
// CHECK1-NEXT: [[_TMP24:%.*]] = alloca float, align 4
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store [2 x i32]* [[VEC]], [2 x i32]** [[VEC_ADDR]], align 8
// CHECK1-NEXT: store float* [[T_VAR]], float** [[T_VAR_ADDR]], align 8
// CHECK1-NEXT: store [2 x %struct.S]* [[S_ARR]], [2 x %struct.S]** [[S_ARR_ADDR]], align 8
// CHECK1-NEXT: store %struct.S* [[VAR]], %struct.S** [[VAR_ADDR]], align 8
// CHECK1-NEXT: store %struct.S* [[VAR1]], %struct.S** [[VAR1_ADDR]], align 8
// CHECK1-NEXT: store float* [[T_VAR1]], float** [[T_VAR1_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load [2 x i32]*, [2 x i32]** [[VEC_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load float*, float** [[T_VAR_ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load [2 x %struct.S]*, [2 x %struct.S]** [[S_ARR_ADDR]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = load %struct.S*, %struct.S** [[VAR_ADDR]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = load %struct.S*, %struct.S** [[VAR1_ADDR]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = load float*, float** [[T_VAR1_ADDR]], align 8
// CHECK1-NEXT: store float 0.000000e+00, float* [[T_VAR2]], align 4
// CHECK1-NEXT: call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR3]])
// CHECK1-NEXT: call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR14]])
// CHECK1-NEXT: store float 0x47EFFFFFE0000000, float* [[T_VAR15]], align 4
// CHECK1-NEXT: [[TMP6:%.*]] = load float, float* [[T_VAR2]], align 4
// CHECK1-NEXT: [[CONV:%.*]] = fptosi float [[TMP6]] to i32
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[TMP0]], i64 0, i64 0
// CHECK1-NEXT: store i32 [[CONV]], i32* [[ARRAYIDX]], align 4
// CHECK1-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[TMP2]], i64 0, i64 0
// CHECK1-NEXT: [[TMP7:%.*]] = bitcast %struct.S* [[ARRAYIDX6]] to i8*
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast %struct.S* [[VAR3]] to i8*
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP7]], i8* align 4 [[TMP8]], i64 4, i1 false)
// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// CHECK1-NEXT: [[TMP10:%.*]] = bitcast float* [[T_VAR2]] to i8*
// CHECK1-NEXT: store i8* [[TMP10]], i8** [[TMP9]], align 8
// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1
// CHECK1-NEXT: [[TMP12:%.*]] = bitcast %struct.S* [[VAR3]] to i8*
// CHECK1-NEXT: store i8* [[TMP12]], i8** [[TMP11]], align 8
// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 2
// CHECK1-NEXT: [[TMP14:%.*]] = bitcast %struct.S* [[VAR14]] to i8*
// CHECK1-NEXT: store i8* [[TMP14]], i8** [[TMP13]], align 8
// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 3
// CHECK1-NEXT: [[TMP16:%.*]] = bitcast float* [[T_VAR15]] to i8*
// CHECK1-NEXT: store i8* [[TMP16]], i8** [[TMP15]], align 8
// CHECK1-NEXT: [[TMP17:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 4
// CHECK1-NEXT: [[TMP19:%.*]] = bitcast [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// CHECK1-NEXT: [[TMP20:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB1]], i32 [[TMP18]], i32 4, i64 32, i8* [[TMP19]], void (i8*, i8*)* @.omp.reduction.reduction_func.2, [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK1-NEXT: switch i32 [[TMP20]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
// CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
// CHECK1-NEXT: ]
// CHECK1: .omp.reduction.case1:
// CHECK1-NEXT: [[TMP21:%.*]] = load float, float* [[TMP1]], align 4
// CHECK1-NEXT: [[TMP22:%.*]] = load float, float* [[T_VAR2]], align 4
// CHECK1-NEXT: [[ADD:%.*]] = fadd float [[TMP21]], [[TMP22]]
// CHECK1-NEXT: store float [[ADD]], float* [[TMP1]], align 4
// CHECK1-NEXT: [[CALL:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S* @_ZN1SIfEanERKS0_(%struct.S* nonnull align 4 dereferenceable(4) [[TMP3]], %struct.S* nonnull align 4 dereferenceable(4) [[VAR3]])
// CHECK1-NEXT: [[TMP23:%.*]] = bitcast %struct.S* [[TMP3]] to i8*
// CHECK1-NEXT: [[TMP24:%.*]] = bitcast %struct.S* [[CALL]] to i8*
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP23]], i8* align 4 [[TMP24]], i64 4, i1 false)
// CHECK1-NEXT: [[CALL7:%.*]] = call float @_ZN1SIfEcvfEv(%struct.S* nonnull align 4 dereferenceable(4) [[TMP4]])
// CHECK1-NEXT: [[TOBOOL:%.*]] = fcmp une float [[CALL7]], 0.000000e+00
// CHECK1-NEXT: br i1 [[TOBOOL]], label [[LAND_RHS:%.*]], label [[LAND_END:%.*]]
// CHECK1: land.rhs:
// CHECK1-NEXT: [[CALL8:%.*]] = call float @_ZN1SIfEcvfEv(%struct.S* nonnull align 4 dereferenceable(4) [[VAR14]])
// CHECK1-NEXT: [[TOBOOL9:%.*]] = fcmp une float [[CALL8]], 0.000000e+00
// CHECK1-NEXT: br label [[LAND_END]]
// CHECK1: land.end:
// CHECK1-NEXT: [[TMP25:%.*]] = phi i1 [ false, [[DOTOMP_REDUCTION_CASE1]] ], [ [[TOBOOL9]], [[LAND_RHS]] ]
// CHECK1-NEXT: [[CONV10:%.*]] = uitofp i1 [[TMP25]] to float
// CHECK1-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP]], float [[CONV10]])
// CHECK1-NEXT: [[TMP26:%.*]] = bitcast %struct.S* [[TMP4]] to i8*
// CHECK1-NEXT: [[TMP27:%.*]] = bitcast %struct.S* [[REF_TMP]] to i8*
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP26]], i8* align 4 [[TMP27]], i64 4, i1 false)
// CHECK1-NEXT: call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP]]) #[[ATTR5]]
// CHECK1-NEXT: [[TMP28:%.*]] = load float, float* [[TMP5]], align 4
// CHECK1-NEXT: [[TMP29:%.*]] = load float, float* [[T_VAR15]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = fcmp olt float [[TMP28]], [[TMP29]]
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK1: cond.true:
// CHECK1-NEXT: [[TMP30:%.*]] = load float, float* [[TMP5]], align 4
// CHECK1-NEXT: br label [[COND_END:%.*]]
// CHECK1: cond.false:
// CHECK1-NEXT: [[TMP31:%.*]] = load float, float* [[T_VAR15]], align 4
// CHECK1-NEXT: br label [[COND_END]]
// CHECK1: cond.end:
// CHECK1-NEXT: [[COND:%.*]] = phi float [ [[TMP30]], [[COND_TRUE]] ], [ [[TMP31]], [[COND_FALSE]] ]
// CHECK1-NEXT: store float [[COND]], float* [[TMP5]], align 4
// CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB1]], i32 [[TMP18]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK1: .omp.reduction.case2:
// CHECK1-NEXT: [[TMP32:%.*]] = load float, float* [[T_VAR2]], align 4
// CHECK1-NEXT: [[TMP33:%.*]] = bitcast float* [[TMP1]] to i32*
// CHECK1-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic i32, i32* [[TMP33]] monotonic, align 4
// CHECK1-NEXT: br label [[ATOMIC_CONT:%.*]]
// CHECK1: atomic_cont:
// CHECK1-NEXT: [[TMP34:%.*]] = phi i32 [ [[ATOMIC_LOAD]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[TMP42:%.*]], [[ATOMIC_CONT]] ]
// CHECK1-NEXT: [[TMP35:%.*]] = bitcast float* [[ATOMIC_TEMP]] to i32*
// CHECK1-NEXT: [[TMP36:%.*]] = bitcast i32 [[TMP34]] to float
// CHECK1-NEXT: store float [[TMP36]], float* [[TMP]], align 4
// CHECK1-NEXT: [[TMP37:%.*]] = load float, float* [[TMP]], align 4
// CHECK1-NEXT: [[TMP38:%.*]] = load float, float* [[T_VAR2]], align 4
// CHECK1-NEXT: [[ADD11:%.*]] = fadd float [[TMP37]], [[TMP38]]
// CHECK1-NEXT: store float [[ADD11]], float* [[ATOMIC_TEMP]], align 4
// CHECK1-NEXT: [[TMP39:%.*]] = load i32, i32* [[TMP35]], align 4
// CHECK1-NEXT: [[TMP40:%.*]] = bitcast float* [[TMP1]] to i32*
// CHECK1-NEXT: [[TMP41:%.*]] = cmpxchg i32* [[TMP40]], i32 [[TMP34]], i32 [[TMP39]] monotonic monotonic, align 4
// CHECK1-NEXT: [[TMP42]] = extractvalue { i32, i1 } [[TMP41]], 0
// CHECK1-NEXT: [[TMP43:%.*]] = extractvalue { i32, i1 } [[TMP41]], 1
// CHECK1-NEXT: br i1 [[TMP43]], label [[ATOMIC_EXIT:%.*]], label [[ATOMIC_CONT]]
// CHECK1: atomic_exit:
// CHECK1-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB2]], i32 [[TMP18]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
// CHECK1-NEXT: [[CALL12:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S* @_ZN1SIfEanERKS0_(%struct.S* nonnull align 4 dereferenceable(4) [[TMP3]], %struct.S* nonnull align 4 dereferenceable(4) [[VAR3]])
// CHECK1-NEXT: [[TMP44:%.*]] = bitcast %struct.S* [[TMP3]] to i8*
// CHECK1-NEXT: [[TMP45:%.*]] = bitcast %struct.S* [[CALL12]] to i8*
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP44]], i8* align 4 [[TMP45]], i64 4, i1 false)
// CHECK1-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB2]], i32 [[TMP18]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
// CHECK1-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB2]], i32 [[TMP18]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
// CHECK1-NEXT: [[CALL14:%.*]] = call float @_ZN1SIfEcvfEv(%struct.S* nonnull align 4 dereferenceable(4) [[TMP4]])
// CHECK1-NEXT: [[TOBOOL15:%.*]] = fcmp une float [[CALL14]], 0.000000e+00
// CHECK1-NEXT: br i1 [[TOBOOL15]], label [[LAND_RHS16:%.*]], label [[LAND_END19:%.*]]
// CHECK1: land.rhs16:
// CHECK1-NEXT: [[CALL17:%.*]] = call float @_ZN1SIfEcvfEv(%struct.S* nonnull align 4 dereferenceable(4) [[VAR14]])
// CHECK1-NEXT: [[TOBOOL18:%.*]] = fcmp une float [[CALL17]], 0.000000e+00
// CHECK1-NEXT: br label [[LAND_END19]]
// CHECK1: land.end19:
// CHECK1-NEXT: [[TMP46:%.*]] = phi i1 [ false, [[ATOMIC_EXIT]] ], [ [[TOBOOL18]], [[LAND_RHS16]] ]
// CHECK1-NEXT: [[CONV20:%.*]] = uitofp i1 [[TMP46]] to float
// CHECK1-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP13]], float [[CONV20]])
// CHECK1-NEXT: [[TMP47:%.*]] = bitcast %struct.S* [[TMP4]] to i8*
// CHECK1-NEXT: [[TMP48:%.*]] = bitcast %struct.S* [[REF_TMP13]] to i8*
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP47]], i8* align 4 [[TMP48]], i64 4, i1 false)
// CHECK1-NEXT: call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP13]]) #[[ATTR5]]
// CHECK1-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB2]], i32 [[TMP18]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
// CHECK1-NEXT: [[TMP49:%.*]] = load float, float* [[T_VAR15]], align 4
// CHECK1-NEXT: [[TMP50:%.*]] = bitcast float* [[TMP5]] to i32*
// CHECK1-NEXT: [[ATOMIC_LOAD21:%.*]] = load atomic i32, i32* [[TMP50]] monotonic, align 4
// CHECK1-NEXT: br label [[ATOMIC_CONT22:%.*]]
// CHECK1: atomic_cont22:
// CHECK1-NEXT: [[TMP51:%.*]] = phi i32 [ [[ATOMIC_LOAD21]], [[LAND_END19]] ], [ [[TMP61:%.*]], [[COND_END28:%.*]] ]
// CHECK1-NEXT: [[TMP52:%.*]] = bitcast float* [[ATOMIC_TEMP23]] to i32*
// CHECK1-NEXT: [[TMP53:%.*]] = bitcast i32 [[TMP51]] to float
// CHECK1-NEXT: store float [[TMP53]], float* [[_TMP24]], align 4
// CHECK1-NEXT: [[TMP54:%.*]] = load float, float* [[_TMP24]], align 4
// CHECK1-NEXT: [[TMP55:%.*]] = load float, float* [[T_VAR15]], align 4
// CHECK1-NEXT: [[CMP25:%.*]] = fcmp olt float [[TMP54]], [[TMP55]]
// CHECK1-NEXT: br i1 [[CMP25]], label [[COND_TRUE26:%.*]], label [[COND_FALSE27:%.*]]
// CHECK1: cond.true26:
// CHECK1-NEXT: [[TMP56:%.*]] = load float, float* [[_TMP24]], align 4
// CHECK1-NEXT: br label [[COND_END28]]
// CHECK1: cond.false27:
// CHECK1-NEXT: [[TMP57:%.*]] = load float, float* [[T_VAR15]], align 4
// CHECK1-NEXT: br label [[COND_END28]]
// CHECK1: cond.end28:
// CHECK1-NEXT: [[COND29:%.*]] = phi float [ [[TMP56]], [[COND_TRUE26]] ], [ [[TMP57]], [[COND_FALSE27]] ]
// CHECK1-NEXT: store float [[COND29]], float* [[ATOMIC_TEMP23]], align 4
// CHECK1-NEXT: [[TMP58:%.*]] = load i32, i32* [[TMP52]], align 4
// CHECK1-NEXT: [[TMP59:%.*]] = bitcast float* [[TMP5]] to i32*
// CHECK1-NEXT: [[TMP60:%.*]] = cmpxchg i32* [[TMP59]], i32 [[TMP51]], i32 [[TMP58]] monotonic monotonic, align 4
// CHECK1-NEXT: [[TMP61]] = extractvalue { i32, i1 } [[TMP60]], 0
// CHECK1-NEXT: [[TMP62:%.*]] = extractvalue { i32, i1 } [[TMP60]], 1
// CHECK1-NEXT: br i1 [[TMP62]], label [[ATOMIC_EXIT30:%.*]], label [[ATOMIC_CONT22]]
// CHECK1: atomic_exit30:
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK1: .omp.reduction.default:
// CHECK1-NEXT: call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR14]]) #[[ATTR5]]
// CHECK1-NEXT: call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR3]]) #[[ATTR5]]
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.2
// CHECK1-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR3]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S:%.*]], align 4
// CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
// CHECK1-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [4 x i8*]*
// CHECK1-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [4 x i8*]*
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 0
// CHECK1-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to float*
// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 0
// CHECK1-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
// CHECK1-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to float*
// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 1
// CHECK1-NEXT: [[TMP13:%.*]] = load i8*, i8** [[TMP12]], align 8
// CHECK1-NEXT: [[TMP14:%.*]] = bitcast i8* [[TMP13]] to %struct.S*
// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 1
// CHECK1-NEXT: [[TMP16:%.*]] = load i8*, i8** [[TMP15]], align 8
// CHECK1-NEXT: [[TMP17:%.*]] = bitcast i8* [[TMP16]] to %struct.S*
// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 2
// CHECK1-NEXT: [[TMP19:%.*]] = load i8*, i8** [[TMP18]], align 8
// CHECK1-NEXT: [[TMP20:%.*]] = bitcast i8* [[TMP19]] to %struct.S*
// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 2
// CHECK1-NEXT: [[TMP22:%.*]] = load i8*, i8** [[TMP21]], align 8
// CHECK1-NEXT: [[TMP23:%.*]] = bitcast i8* [[TMP22]] to %struct.S*
// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 3
// CHECK1-NEXT: [[TMP25:%.*]] = load i8*, i8** [[TMP24]], align 8
// CHECK1-NEXT: [[TMP26:%.*]] = bitcast i8* [[TMP25]] to float*
// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 3
// CHECK1-NEXT: [[TMP28:%.*]] = load i8*, i8** [[TMP27]], align 8
// CHECK1-NEXT: [[TMP29:%.*]] = bitcast i8* [[TMP28]] to float*
// CHECK1-NEXT: [[TMP30:%.*]] = load float, float* [[TMP11]], align 4
// CHECK1-NEXT: [[TMP31:%.*]] = load float, float* [[TMP8]], align 4
// CHECK1-NEXT: [[ADD:%.*]] = fadd float [[TMP30]], [[TMP31]]
// CHECK1-NEXT: store float [[ADD]], float* [[TMP11]], align 4
// CHECK1-NEXT: [[CALL:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S* @_ZN1SIfEanERKS0_(%struct.S* nonnull align 4 dereferenceable(4) [[TMP17]], %struct.S* nonnull align 4 dereferenceable(4) [[TMP14]])
// CHECK1-NEXT: [[TMP32:%.*]] = bitcast %struct.S* [[TMP17]] to i8*
// CHECK1-NEXT: [[TMP33:%.*]] = bitcast %struct.S* [[CALL]] to i8*
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP32]], i8* align 4 [[TMP33]], i64 4, i1 false)
// CHECK1-NEXT: [[CALL2:%.*]] = call float @_ZN1SIfEcvfEv(%struct.S* nonnull align 4 dereferenceable(4) [[TMP23]])
// CHECK1-NEXT: [[TOBOOL:%.*]] = fcmp une float [[CALL2]], 0.000000e+00
// CHECK1-NEXT: br i1 [[TOBOOL]], label [[LAND_RHS:%.*]], label [[LAND_END:%.*]]
// CHECK1: land.rhs:
// CHECK1-NEXT: [[CALL3:%.*]] = call float @_ZN1SIfEcvfEv(%struct.S* nonnull align 4 dereferenceable(4) [[TMP20]])
// CHECK1-NEXT: [[TOBOOL4:%.*]] = fcmp une float [[CALL3]], 0.000000e+00
// CHECK1-NEXT: br label [[LAND_END]]
// CHECK1: land.end:
// CHECK1-NEXT: [[TMP34:%.*]] = phi i1 [ false, [[ENTRY:%.*]] ], [ [[TOBOOL4]], [[LAND_RHS]] ]
// CHECK1-NEXT: [[CONV:%.*]] = uitofp i1 [[TMP34]] to float
// CHECK1-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP]], float [[CONV]])
// CHECK1-NEXT: [[TMP35:%.*]] = bitcast %struct.S* [[TMP23]] to i8*
// CHECK1-NEXT: [[TMP36:%.*]] = bitcast %struct.S* [[REF_TMP]] to i8*
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP35]], i8* align 4 [[TMP36]], i64 4, i1 false)
// CHECK1-NEXT: call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP]]) #[[ATTR5]]
// CHECK1-NEXT: [[TMP37:%.*]] = load float, float* [[TMP29]], align 4
// CHECK1-NEXT: [[TMP38:%.*]] = load float, float* [[TMP26]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = fcmp olt float [[TMP37]], [[TMP38]]
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK1: cond.true:
// CHECK1-NEXT: [[TMP39:%.*]] = load float, float* [[TMP29]], align 4
// CHECK1-NEXT: br label [[COND_END:%.*]]
// CHECK1: cond.false:
// CHECK1-NEXT: [[TMP40:%.*]] = load float, float* [[TMP26]], align 4
// CHECK1-NEXT: br label [[COND_END]]
// CHECK1: cond.end:
// CHECK1-NEXT: [[COND:%.*]] = phi float [ [[TMP39]], [[COND_TRUE]] ], [ [[TMP40]], [[COND_FALSE]] ]
// CHECK1-NEXT: store float [[COND]], float* [[TMP29]], align 4
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEanERKS0_
// CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[TMP0:%.*]]) #[[ATTR0]] align 2 {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca %struct.S*, align 8
// CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
// CHECK1-NEXT: store %struct.S* [[TMP0]], %struct.S** [[DOTADDR]], align 8
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
// CHECK1-NEXT: ret %struct.S* [[THIS1]]
//
//
// CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEcvfEv
// CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) #[[ATTR0]] align 2 {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
// CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
// CHECK1-NEXT: ret float 0.000000e+00
//
//
// CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfED1Ev
// CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR7]] align 2 {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
// CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
// CHECK1-NEXT: call void @_ZN1SIfED2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR5]]
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..3
// CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [2 x i32]* nonnull align 4 dereferenceable(8) [[VEC:%.*]], float* nonnull align 4 dereferenceable(4) [[T_VAR:%.*]], [2 x %struct.S]* nonnull align 4 dereferenceable(8) [[S_ARR:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[VAR:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[VAR1:%.*]], float* nonnull align 4 dereferenceable(4) [[T_VAR1:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 8
// CHECK1-NEXT: [[T_VAR_ADDR:%.*]] = alloca float*, align 8
// CHECK1-NEXT: [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S]*, align 8
// CHECK1-NEXT: [[VAR_ADDR:%.*]] = alloca %struct.S*, align 8
// CHECK1-NEXT: [[VAR1_ADDR:%.*]] = alloca %struct.S*, align 8
// CHECK1-NEXT: [[T_VAR1_ADDR:%.*]] = alloca float*, align 8
// CHECK1-NEXT: [[T_VAR2:%.*]] = alloca float, align 4
// CHECK1-NEXT: [[VAR3:%.*]] = alloca [[STRUCT_S:%.*]], align 4
// CHECK1-NEXT: [[VAR14:%.*]] = alloca [[STRUCT_S]], align 4
// CHECK1-NEXT: [[T_VAR15:%.*]] = alloca float, align 4
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store [2 x i32]* [[VEC]], [2 x i32]** [[VEC_ADDR]], align 8
// CHECK1-NEXT: store float* [[T_VAR]], float** [[T_VAR_ADDR]], align 8
// CHECK1-NEXT: store [2 x %struct.S]* [[S_ARR]], [2 x %struct.S]** [[S_ARR_ADDR]], align 8
// CHECK1-NEXT: store %struct.S* [[VAR]], %struct.S** [[VAR_ADDR]], align 8
// CHECK1-NEXT: store %struct.S* [[VAR1]], %struct.S** [[VAR1_ADDR]], align 8
// CHECK1-NEXT: store float* [[T_VAR1]], float** [[T_VAR1_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load [2 x i32]*, [2 x i32]** [[VEC_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load float*, float** [[T_VAR_ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load [2 x %struct.S]*, [2 x %struct.S]** [[S_ARR_ADDR]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = load %struct.S*, %struct.S** [[VAR_ADDR]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = load %struct.S*, %struct.S** [[VAR1_ADDR]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = load float*, float** [[T_VAR1_ADDR]], align 8
// CHECK1-NEXT: store float 0.000000e+00, float* [[T_VAR2]], align 4
// CHECK1-NEXT: call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR3]])
// CHECK1-NEXT: call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR14]])
// CHECK1-NEXT: store float 0x47EFFFFFE0000000, float* [[T_VAR15]], align 4
// CHECK1-NEXT: br label [[WHILE_COND:%.*]]
// CHECK1: while.cond:
// CHECK1-NEXT: br label [[WHILE_BODY:%.*]]
// CHECK1: while.body:
// CHECK1-NEXT: [[TMP6:%.*]] = load float, float* [[T_VAR2]], align 4
// CHECK1-NEXT: [[CONV:%.*]] = fptosi float [[TMP6]] to i32
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[TMP0]], i64 0, i64 0
// CHECK1-NEXT: store i32 [[CONV]], i32* [[ARRAYIDX]], align 4
// CHECK1-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[TMP2]], i64 0, i64 0
// CHECK1-NEXT: [[TMP7:%.*]] = bitcast %struct.S* [[ARRAYIDX6]] to i8*
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast %struct.S* [[VAR3]] to i8*
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP7]], i8* align 4 [[TMP8]], i64 4, i1 false)
// CHECK1-NEXT: br label [[WHILE_COND]], !llvm.loop [[LOOP4:![0-9]+]]
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..4
// CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], { float, float }* nonnull align 4 dereferenceable(8) [[CF:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[CF_ADDR:%.*]] = alloca { float, float }*, align 8
// CHECK1-NEXT: [[CF1:%.*]] = alloca { float, float }, align 4
// CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
// CHECK1-NEXT: [[ATOMIC_TEMP:%.*]] = alloca { float, float }, align 4
// CHECK1-NEXT: [[ATOMIC_TEMP10:%.*]] = alloca { float, float }, align 4
// CHECK1-NEXT: [[TMP:%.*]] = alloca { float, float }, align 4
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store { float, float }* [[CF]], { float, float }** [[CF_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load { float, float }*, { float, float }** [[CF_ADDR]], align 8
// CHECK1-NEXT: [[CF1_REALP:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[CF1]], i32 0, i32 0
// CHECK1-NEXT: [[CF1_IMAGP:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[CF1]], i32 0, i32 1
// CHECK1-NEXT: store float 0.000000e+00, float* [[CF1_REALP]], align 4
// CHECK1-NEXT: store float 0.000000e+00, float* [[CF1_IMAGP]], align 4
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// CHECK1-NEXT: [[TMP2:%.*]] = bitcast { float, float }* [[CF1]] to i8*
// CHECK1-NEXT: store i8* [[TMP2]], i8** [[TMP1]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// CHECK1-NEXT: [[TMP6:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]], i32 1, i64 8, i8* [[TMP5]], void (i8*, i8*)* @.omp.reduction.reduction_func.5, [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK1-NEXT: switch i32 [[TMP6]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
// CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
// CHECK1-NEXT: ]
// CHECK1: .omp.reduction.case1:
// CHECK1-NEXT: [[DOTREALP:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[DOTREAL:%.*]] = load float, float* [[DOTREALP]], align 4
// CHECK1-NEXT: [[DOTIMAGP:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[TMP0]], i32 0, i32 1
// CHECK1-NEXT: [[DOTIMAG:%.*]] = load float, float* [[DOTIMAGP]], align 4
// CHECK1-NEXT: [[CF1_REALP2:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[CF1]], i32 0, i32 0
// CHECK1-NEXT: [[CF1_REAL:%.*]] = load float, float* [[CF1_REALP2]], align 4
// CHECK1-NEXT: [[CF1_IMAGP3:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[CF1]], i32 0, i32 1
// CHECK1-NEXT: [[CF1_IMAG:%.*]] = load float, float* [[CF1_IMAGP3]], align 4
// CHECK1-NEXT: [[ADD_R:%.*]] = fadd float [[DOTREAL]], [[CF1_REAL]]
// CHECK1-NEXT: [[ADD_I:%.*]] = fadd float [[DOTIMAG]], [[CF1_IMAG]]
// CHECK1-NEXT: [[DOTREALP4:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[TMP0]], i32 0, i32 0
// CHECK1-NEXT: [[DOTIMAGP5:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[TMP0]], i32 0, i32 1
// CHECK1-NEXT: store float [[ADD_R]], float* [[DOTREALP4]], align 4
// CHECK1-NEXT: store float [[ADD_I]], float* [[DOTIMAGP5]], align 4
// CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK1: .omp.reduction.case2:
// CHECK1-NEXT: [[CF1_REALP6:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[CF1]], i32 0, i32 0
// CHECK1-NEXT: [[CF1_REAL7:%.*]] = load float, float* [[CF1_REALP6]], align 4
// CHECK1-NEXT: [[CF1_IMAGP8:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[CF1]], i32 0, i32 1
// CHECK1-NEXT: [[CF1_IMAG9:%.*]] = load float, float* [[CF1_IMAGP8]], align 4
// CHECK1-NEXT: [[TMP7:%.*]] = bitcast { float, float }* [[TMP0]] to i8*
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast { float, float }* [[ATOMIC_TEMP]] to i8*
// CHECK1-NEXT: call void @__atomic_load(i64 8, i8* [[TMP7]], i8* [[TMP8]], i32 0)
// CHECK1-NEXT: br label [[ATOMIC_CONT:%.*]]
// CHECK1: atomic_cont:
// CHECK1-NEXT: [[ATOMIC_TEMP_REALP:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[ATOMIC_TEMP]], i32 0, i32 0
// CHECK1-NEXT: [[ATOMIC_TEMP_REAL:%.*]] = load float, float* [[ATOMIC_TEMP_REALP]], align 4
// CHECK1-NEXT: [[ATOMIC_TEMP_IMAGP:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[ATOMIC_TEMP]], i32 0, i32 1
// CHECK1-NEXT: [[ATOMIC_TEMP_IMAG:%.*]] = load float, float* [[ATOMIC_TEMP_IMAGP]], align 4
// CHECK1-NEXT: [[TMP_REALP:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[TMP]], i32 0, i32 0
// CHECK1-NEXT: [[TMP_IMAGP:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[TMP]], i32 0, i32 1
// CHECK1-NEXT: store float [[ATOMIC_TEMP_REAL]], float* [[TMP_REALP]], align 4
// CHECK1-NEXT: store float [[ATOMIC_TEMP_IMAG]], float* [[TMP_IMAGP]], align 4
// CHECK1-NEXT: [[TMP_REALP11:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[TMP]], i32 0, i32 0
// CHECK1-NEXT: [[TMP_REAL:%.*]] = load float, float* [[TMP_REALP11]], align 4
// CHECK1-NEXT: [[TMP_IMAGP12:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[TMP]], i32 0, i32 1
// CHECK1-NEXT: [[TMP_IMAG:%.*]] = load float, float* [[TMP_IMAGP12]], align 4
// CHECK1-NEXT: [[CF1_REALP13:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[CF1]], i32 0, i32 0
// CHECK1-NEXT: [[CF1_REAL14:%.*]] = load float, float* [[CF1_REALP13]], align 4
// CHECK1-NEXT: [[CF1_IMAGP15:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[CF1]], i32 0, i32 1
// CHECK1-NEXT: [[CF1_IMAG16:%.*]] = load float, float* [[CF1_IMAGP15]], align 4
// CHECK1-NEXT: [[ADD_R17:%.*]] = fadd float [[TMP_REAL]], [[CF1_REAL14]]
// CHECK1-NEXT: [[ADD_I18:%.*]] = fadd float [[TMP_IMAG]], [[CF1_IMAG16]]
// CHECK1-NEXT: [[ATOMIC_TEMP10_REALP:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[ATOMIC_TEMP10]], i32 0, i32 0
// CHECK1-NEXT: [[ATOMIC_TEMP10_IMAGP:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[ATOMIC_TEMP10]], i32 0, i32 1
// CHECK1-NEXT: store float [[ADD_R17]], float* [[ATOMIC_TEMP10_REALP]], align 4
// CHECK1-NEXT: store float [[ADD_I18]], float* [[ATOMIC_TEMP10_IMAGP]], align 4
// CHECK1-NEXT: [[TMP9:%.*]] = bitcast { float, float }* [[TMP0]] to i8*
// CHECK1-NEXT: [[TMP10:%.*]] = bitcast { float, float }* [[ATOMIC_TEMP]] to i8*
// CHECK1-NEXT: [[TMP11:%.*]] = bitcast { float, float }* [[ATOMIC_TEMP10]] to i8*
// CHECK1-NEXT: [[CALL:%.*]] = call zeroext i1 @__atomic_compare_exchange(i64 8, i8* [[TMP9]], i8* [[TMP10]], i8* [[TMP11]], i32 0, i32 0)
// CHECK1-NEXT: br i1 [[CALL]], label [[ATOMIC_EXIT:%.*]], label [[ATOMIC_CONT]]
// CHECK1: atomic_exit:
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK1: .omp.reduction.default:
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.5
// CHECK1-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR3]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
// CHECK1-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
// CHECK1-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
// CHECK1-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to { float, float }*
// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
// CHECK1-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
// CHECK1-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to { float, float }*
// CHECK1-NEXT: [[DOTREALP:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[TMP11]], i32 0, i32 0
// CHECK1-NEXT: [[DOTREAL:%.*]] = load float, float* [[DOTREALP]], align 4
// CHECK1-NEXT: [[DOTIMAGP:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[TMP11]], i32 0, i32 1
// CHECK1-NEXT: [[DOTIMAG:%.*]] = load float, float* [[DOTIMAGP]], align 4
// CHECK1-NEXT: [[DOTREALP2:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[TMP8]], i32 0, i32 0
// CHECK1-NEXT: [[DOTREAL3:%.*]] = load float, float* [[DOTREALP2]], align 4
// CHECK1-NEXT: [[DOTIMAGP4:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[TMP8]], i32 0, i32 1
// CHECK1-NEXT: [[DOTIMAG5:%.*]] = load float, float* [[DOTIMAGP4]], align 4
// CHECK1-NEXT: [[ADD_R:%.*]] = fadd float [[DOTREAL]], [[DOTREAL3]]
// CHECK1-NEXT: [[ADD_I:%.*]] = fadd float [[DOTIMAG]], [[DOTIMAG5]]
// CHECK1-NEXT: [[DOTREALP6:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[TMP11]], i32 0, i32 0
// CHECK1-NEXT: [[DOTIMAGP7:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[TMP11]], i32 0, i32 1
// CHECK1-NEXT: store float [[ADD_R]], float* [[DOTREALP6]], align 4
// CHECK1-NEXT: store float [[ADD_I]], float* [[DOTIMAGP7]], align 4
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
// CHECK1-SAME: () #[[ATTR0]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[T:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TEST:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
// CHECK1-NEXT: [[SST:%.*]] = alloca [[STRUCT_SST:%.*]], align 4
// CHECK1-NEXT: [[T_VAR:%.*]] = alloca i32, align 128
// CHECK1-NEXT: [[T_VAR1:%.*]] = alloca i32, align 128
// CHECK1-NEXT: [[VEC:%.*]] = alloca [2 x i32], align 4
// CHECK1-NEXT: [[S_ARR:%.*]] = alloca [2 x %struct.S.0], align 4
// CHECK1-NEXT: [[VAR:%.*]] = alloca [[STRUCT_S_0]], align 128
// CHECK1-NEXT: [[VAR1:%.*]] = alloca [[STRUCT_S_0]], align 128
// CHECK1-NEXT: call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[TEST]])
// CHECK1-NEXT: call void @_ZN3SSTIiEC1Ev(%struct.SST* nonnull align 4 dereferenceable(4) [[SST]])
// CHECK1-NEXT: store i32 0, i32* [[T_VAR]], align 128
// CHECK1-NEXT: [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const._Z5tmainIiET_v.vec to i8*), i64 8, i1 false)
// CHECK1-NEXT: [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i64 0, i64 0
// CHECK1-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN]], i32 1)
// CHECK1-NEXT: [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYINIT_BEGIN]], i64 1
// CHECK1-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], i32 2)
// CHECK1-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR]], i32 3)
// CHECK1-NEXT: call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR1]])
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [2 x i32]*, i32*, [2 x %struct.S.0]*, %struct.S.0*, %struct.S.0*, i32*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), [2 x i32]* [[VEC]], i32* [[T_VAR]], [2 x %struct.S.0]* [[S_ARR]], %struct.S.0* [[VAR]], %struct.S.0* [[VAR1]], i32* [[T_VAR1]])
// CHECK1-NEXT: store i32 0, i32* [[RETVAL]], align 4
// CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR1]]) #[[ATTR5]]
// CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR]]) #[[ATTR5]]
// CHECK1-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i64 2
// CHECK1-NEXT: br label [[ARRAYDESTROY_BODY:%.*]]
// CHECK1: arraydestroy.body:
// CHECK1-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP1]], [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
// CHECK1-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
// CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR5]]
// CHECK1-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
// CHECK1-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]]
// CHECK1: arraydestroy.done1:
// CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR5]]
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[RETVAL]], align 4
// CHECK1-NEXT: ret i32 [[TMP2]]
//
//
// CHECK1-LABEL: define {{[^@]+}}@_ZN2SSC2ERi
// CHECK1-SAME: (%struct.SS* nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32* nonnull align 4 dereferenceable(4) [[D:%.*]]) unnamed_addr #[[ATTR7]] align 2 {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK1-NEXT: [[D_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[A2:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[B4:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[C5:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK1-NEXT: store i32* [[D]], i32** [[D_ADDR]], align 8
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[THIS1]], i32 0, i32 0
// CHECK1-NEXT: store i32 0, i32* [[A]], align 8
// CHECK1-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 1
// CHECK1-NEXT: [[BF_LOAD:%.*]] = load i8, i8* [[B]], align 4
// CHECK1-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -16
// CHECK1-NEXT: store i8 [[BF_CLEAR]], i8* [[B]], align 4
// CHECK1-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 2
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[D_ADDR]], align 8
// CHECK1-NEXT: store i32* [[TMP0]], i32** [[C]], align 8
// CHECK1-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
// CHECK1-NEXT: store i32* [[A3]], i32** [[A2]], align 8
// CHECK1-NEXT: [[C6:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 2
// CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[C6]], align 8
// CHECK1-NEXT: store i32* [[TMP1]], i32** [[C5]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[A2]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = load i32*, i32** [[C5]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*, i32*, i32*, i32*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), %struct.SS* [[THIS1]], i32* [[TMP2]], i32* [[B4]], i32* [[TMP3]])
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[B4]], align 4
// CHECK1-NEXT: [[B7:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 1
// CHECK1-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i8
// CHECK1-NEXT: [[BF_LOAD8:%.*]] = load i8, i8* [[B7]], align 4
// CHECK1-NEXT: [[BF_VALUE:%.*]] = and i8 [[TMP5]], 15
// CHECK1-NEXT: [[BF_CLEAR9:%.*]] = and i8 [[BF_LOAD8]], -16
// CHECK1-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR9]], [[BF_VALUE]]
// CHECK1-NEXT: store i8 [[BF_SET]], i8* [[B7]], align 4
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..6
// CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i32* nonnull align 4 dereferenceable(4) [[B:%.*]], i32* nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[C_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[A2:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[_TMP3:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[B4:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[C5:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[_TMP6:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [3 x i8*], align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK1-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK1-NEXT: store i32* [[B]], i32** [[B_ADDR]], align 8
// CHECK1-NEXT: store i32* [[C]], i32** [[C_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[B_ADDR]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = load i32*, i32** [[C_ADDR]], align 8
// CHECK1-NEXT: store i32* [[TMP1]], i32** [[TMP]], align 8
// CHECK1-NEXT: store i32* [[TMP3]], i32** [[_TMP1]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = load i32*, i32** [[TMP]], align 8
// CHECK1-NEXT: store i32 0, i32* [[A2]], align 4
// CHECK1-NEXT: store i32* [[A2]], i32** [[_TMP3]], align 8
// CHECK1-NEXT: store i32 0, i32* [[B4]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = load i32*, i32** [[_TMP1]], align 8
// CHECK1-NEXT: store i32 0, i32* [[C5]], align 4
// CHECK1-NEXT: store i32* [[C5]], i32** [[_TMP6]], align 8
// CHECK1-NEXT: [[TMP6:%.*]] = load i32*, i32** [[_TMP3]], align 8
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
// CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP7]], 1
// CHECK1-NEXT: store i32 [[INC]], i32* [[TMP6]], align 4
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[B4]], align 4
// CHECK1-NEXT: [[DEC:%.*]] = add nsw i32 [[TMP8]], -1
// CHECK1-NEXT: store i32 [[DEC]], i32* [[B4]], align 4
// CHECK1-NEXT: [[TMP9:%.*]] = load i32*, i32** [[_TMP6]], align 8
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
// CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP10]], 1
// CHECK1-NEXT: store i32 [[DIV]], i32* [[TMP9]], align 4
// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// CHECK1-NEXT: [[TMP12:%.*]] = bitcast i32* [[A2]] to i8*
// CHECK1-NEXT: store i8* [[TMP12]], i8** [[TMP11]], align 8
// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1
// CHECK1-NEXT: [[TMP14:%.*]] = bitcast i32* [[B4]] to i8*
// CHECK1-NEXT: store i8* [[TMP14]], i8** [[TMP13]], align 8
// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 2
// CHECK1-NEXT: [[TMP16:%.*]] = bitcast i32* [[C5]] to i8*
// CHECK1-NEXT: store i8* [[TMP16]], i8** [[TMP15]], align 8
// CHECK1-NEXT: [[TMP17:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 4
// CHECK1-NEXT: [[TMP19:%.*]] = bitcast [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// CHECK1-NEXT: [[TMP20:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB1]], i32 [[TMP18]], i32 3, i64 24, i8* [[TMP19]], void (i8*, i8*)* @.omp.reduction.reduction_func.7, [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK1-NEXT: switch i32 [[TMP20]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
// CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
// CHECK1-NEXT: ]
// CHECK1: .omp.reduction.case1:
// CHECK1-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP4]], align 4
// CHECK1-NEXT: [[TMP22:%.*]] = load i32, i32* [[A2]], align 4
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
// CHECK1-NEXT: store i32 [[ADD]], i32* [[TMP4]], align 4
// CHECK1-NEXT: [[TMP23:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK1-NEXT: [[TMP24:%.*]] = load i32, i32* [[B4]], align 4
// CHECK1-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
// CHECK1-NEXT: store i32 [[ADD7]], i32* [[TMP2]], align 4
// CHECK1-NEXT: [[TMP25:%.*]] = load i32, i32* [[TMP5]], align 4
// CHECK1-NEXT: [[TMP26:%.*]] = load i32, i32* [[C5]], align 4
// CHECK1-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
// CHECK1-NEXT: store i32 [[ADD8]], i32* [[TMP5]], align 4
// CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB1]], i32 [[TMP18]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK1: .omp.reduction.case2:
// CHECK1-NEXT: [[TMP27:%.*]] = load i32, i32* [[A2]], align 4
// CHECK1-NEXT: [[TMP28:%.*]] = atomicrmw add i32* [[TMP4]], i32 [[TMP27]] monotonic, align 4
// CHECK1-NEXT: [[TMP29:%.*]] = load i32, i32* [[B4]], align 4
// CHECK1-NEXT: [[TMP30:%.*]] = atomicrmw add i32* [[TMP2]], i32 [[TMP29]] monotonic, align 4
// CHECK1-NEXT: [[TMP31:%.*]] = load i32, i32* [[C5]], align 4
// CHECK1-NEXT: [[TMP32:%.*]] = atomicrmw add i32* [[TMP5]], i32 [[TMP31]] monotonic, align 4
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK1: .omp.reduction.default:
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.7
// CHECK1-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR3]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
// CHECK1-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [3 x i8*]*
// CHECK1-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [3 x i8*]*
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP5]], i64 0, i64 0
// CHECK1-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP3]], i64 0, i64 0
// CHECK1-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
// CHECK1-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP5]], i64 0, i64 1
// CHECK1-NEXT: [[TMP13:%.*]] = load i8*, i8** [[TMP12]], align 8
// CHECK1-NEXT: [[TMP14:%.*]] = bitcast i8* [[TMP13]] to i32*
// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP3]], i64 0, i64 1
// CHECK1-NEXT: [[TMP16:%.*]] = load i8*, i8** [[TMP15]], align 8
// CHECK1-NEXT: [[TMP17:%.*]] = bitcast i8* [[TMP16]] to i32*
// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP5]], i64 0, i64 2
// CHECK1-NEXT: [[TMP19:%.*]] = load i8*, i8** [[TMP18]], align 8
// CHECK1-NEXT: [[TMP20:%.*]] = bitcast i8* [[TMP19]] to i32*
// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP3]], i64 0, i64 2
// CHECK1-NEXT: [[TMP22:%.*]] = load i8*, i8** [[TMP21]], align 8
// CHECK1-NEXT: [[TMP23:%.*]] = bitcast i8* [[TMP22]] to i32*
// CHECK1-NEXT: [[TMP24:%.*]] = load i32, i32* [[TMP11]], align 4
// CHECK1-NEXT: [[TMP25:%.*]] = load i32, i32* [[TMP8]], align 4
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP24]], [[TMP25]]
// CHECK1-NEXT: store i32 [[ADD]], i32* [[TMP11]], align 4
// CHECK1-NEXT: [[TMP26:%.*]] = load i32, i32* [[TMP17]], align 4
// CHECK1-NEXT: [[TMP27:%.*]] = load i32, i32* [[TMP14]], align 4
// CHECK1-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP26]], [[TMP27]]
// CHECK1-NEXT: store i32 [[ADD2]], i32* [[TMP17]], align 4
// CHECK1-NEXT: [[TMP28:%.*]] = load i32, i32* [[TMP23]], align 4
// CHECK1-NEXT: [[TMP29:%.*]] = load i32, i32* [[TMP20]], align 4
// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP28]], [[TMP29]]
// CHECK1-NEXT: store i32 [[ADD3]], i32* [[TMP23]], align 4
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ev
// CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR7]] align 2 {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
// CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
// CHECK1-NEXT: [[TMP0:%.*]] = load volatile i32, i32* @g, align 128
// CHECK1-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP0]] to float
// CHECK1-NEXT: store float [[CONV]], float* [[F]], align 4
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ef
// CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR7]] align 2 {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca float, align 4
// CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
// CHECK1-NEXT: store float [[A]], float* [[A_ADDR]], align 4
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
// CHECK1-NEXT: [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
// CHECK1-NEXT: [[TMP1:%.*]] = load volatile i32, i32* @g, align 128
// CHECK1-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP1]] to float
// CHECK1-NEXT: [[ADD:%.*]] = fadd float [[TMP0]], [[CONV]]
// CHECK1-NEXT: store float [[ADD]], float* [[F]], align 4
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfED2Ev
// CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR7]] align 2 {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
// CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ev
// CHECK1-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR7]] align 2 {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
// CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
// CHECK1-NEXT: call void @_ZN1SIiEC2Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@_ZN3SSTIiEC1Ev
// CHECK1-SAME: (%struct.SST* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR7]] align 2 {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SST*, align 8
// CHECK1-NEXT: store %struct.SST* [[THIS]], %struct.SST** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.SST*, %struct.SST** [[THIS_ADDR]], align 8
// CHECK1-NEXT: call void @_ZN3SSTIiEC2Ev(%struct.SST* nonnull align 4 dereferenceable(4) [[THIS1]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ei
// CHECK1-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 [[A:%.*]]) unnamed_addr #[[ATTR7]] align 2 {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
// CHECK1-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK1-NEXT: call void @_ZN1SIiEC2Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]], i32 [[TMP0]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..8
// CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [2 x i32]* nonnull align 4 dereferenceable(8) [[VEC:%.*]], i32* nonnull align 4 dereferenceable(4) [[T_VAR:%.*]], [2 x %struct.S.0]* nonnull align 4 dereferenceable(8) [[S_ARR:%.*]], %struct.S.0* nonnull align 4 dereferenceable(4) [[VAR:%.*]], %struct.S.0* nonnull align 4 dereferenceable(4) [[VAR1:%.*]], i32* nonnull align 4 dereferenceable(4) [[T_VAR1:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 8
// CHECK1-NEXT: [[T_VAR_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S.0]*, align 8
// CHECK1-NEXT: [[VAR_ADDR:%.*]] = alloca %struct.S.0*, align 8
// CHECK1-NEXT: [[VAR1_ADDR:%.*]] = alloca %struct.S.0*, align 8
// CHECK1-NEXT: [[T_VAR1_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[T_VAR2:%.*]] = alloca i32, align 128
// CHECK1-NEXT: [[VAR3:%.*]] = alloca [[STRUCT_S_0:%.*]], align 128
// CHECK1-NEXT: [[VAR14:%.*]] = alloca [[STRUCT_S_0]], align 128
// CHECK1-NEXT: [[T_VAR15:%.*]] = alloca i32, align 128
// CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [4 x i8*], align 8
// CHECK1-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S_0]], align 4
// CHECK1-NEXT: [[REF_TMP11:%.*]] = alloca [[STRUCT_S_0]], align 4
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store [2 x i32]* [[VEC]], [2 x i32]** [[VEC_ADDR]], align 8
// CHECK1-NEXT: store i32* [[T_VAR]], i32** [[T_VAR_ADDR]], align 8
// CHECK1-NEXT: store [2 x %struct.S.0]* [[S_ARR]], [2 x %struct.S.0]** [[S_ARR_ADDR]], align 8
// CHECK1-NEXT: store %struct.S.0* [[VAR]], %struct.S.0** [[VAR_ADDR]], align 8
// CHECK1-NEXT: store %struct.S.0* [[VAR1]], %struct.S.0** [[VAR1_ADDR]], align 8
// CHECK1-NEXT: store i32* [[T_VAR1]], i32** [[T_VAR1_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load [2 x i32]*, [2 x i32]** [[VEC_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[T_VAR_ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load [2 x %struct.S.0]*, [2 x %struct.S.0]** [[S_ARR_ADDR]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR_ADDR]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR1_ADDR]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = load i32*, i32** [[T_VAR1_ADDR]], align 8
// CHECK1-NEXT: store i32 0, i32* [[T_VAR2]], align 128
// CHECK1-NEXT: call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR3]])
// CHECK1-NEXT: call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR14]])
// CHECK1-NEXT: store i32 2147483647, i32* [[T_VAR15]], align 128
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[T_VAR2]], align 128
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[TMP0]], i64 0, i64 0
// CHECK1-NEXT: store i32 [[TMP6]], i32* [[ARRAYIDX]], align 4
// CHECK1-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[TMP2]], i64 0, i64 0
// CHECK1-NEXT: [[TMP7:%.*]] = bitcast %struct.S.0* [[ARRAYIDX6]] to i8*
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast %struct.S.0* [[VAR3]] to i8*
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP7]], i8* align 128 [[TMP8]], i64 4, i1 false)
// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// CHECK1-NEXT: [[TMP10:%.*]] = bitcast i32* [[T_VAR2]] to i8*
// CHECK1-NEXT: store i8* [[TMP10]], i8** [[TMP9]], align 8
// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1
// CHECK1-NEXT: [[TMP12:%.*]] = bitcast %struct.S.0* [[VAR3]] to i8*
// CHECK1-NEXT: store i8* [[TMP12]], i8** [[TMP11]], align 8
// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 2
// CHECK1-NEXT: [[TMP14:%.*]] = bitcast %struct.S.0* [[VAR14]] to i8*
// CHECK1-NEXT: store i8* [[TMP14]], i8** [[TMP13]], align 8
// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 3
// CHECK1-NEXT: [[TMP16:%.*]] = bitcast i32* [[T_VAR15]] to i8*
// CHECK1-NEXT: store i8* [[TMP16]], i8** [[TMP15]], align 8
// CHECK1-NEXT: [[TMP17:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 4
// CHECK1-NEXT: [[TMP19:%.*]] = bitcast [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// CHECK1-NEXT: [[TMP20:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB1]], i32 [[TMP18]], i32 4, i64 32, i8* [[TMP19]], void (i8*, i8*)* @.omp.reduction.reduction_func.9, [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK1-NEXT: switch i32 [[TMP20]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
// CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
// CHECK1-NEXT: ]
// CHECK1: .omp.reduction.case1:
// CHECK1-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP1]], align 128
// CHECK1-NEXT: [[TMP22:%.*]] = load i32, i32* [[T_VAR2]], align 128
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
// CHECK1-NEXT: store i32 [[ADD]], i32* [[TMP1]], align 128
// CHECK1-NEXT: [[CALL:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S.0* @_ZN1SIiEanERKS0_(%struct.S.0* nonnull align 4 dereferenceable(4) [[TMP3]], %struct.S.0* nonnull align 4 dereferenceable(4) [[VAR3]])
// CHECK1-NEXT: [[TMP23:%.*]] = bitcast %struct.S.0* [[TMP3]] to i8*
// CHECK1-NEXT: [[TMP24:%.*]] = bitcast %struct.S.0* [[CALL]] to i8*
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 128 [[TMP23]], i8* align 4 [[TMP24]], i64 4, i1 false)
// CHECK1-NEXT: [[CALL7:%.*]] = call i32 @_ZN1SIiEcviEv(%struct.S.0* nonnull align 4 dereferenceable(4) [[TMP4]])
// CHECK1-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[CALL7]], 0
// CHECK1-NEXT: br i1 [[TOBOOL]], label [[LAND_RHS:%.*]], label [[LAND_END:%.*]]
// CHECK1: land.rhs:
// CHECK1-NEXT: [[CALL8:%.*]] = call i32 @_ZN1SIiEcviEv(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR14]])
// CHECK1-NEXT: [[TOBOOL9:%.*]] = icmp ne i32 [[CALL8]], 0
// CHECK1-NEXT: br label [[LAND_END]]
// CHECK1: land.end:
// CHECK1-NEXT: [[TMP25:%.*]] = phi i1 [ false, [[DOTOMP_REDUCTION_CASE1]] ], [ [[TOBOOL9]], [[LAND_RHS]] ]
// CHECK1-NEXT: [[CONV:%.*]] = zext i1 [[TMP25]] to i32
// CHECK1-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[REF_TMP]], i32 [[CONV]])
// CHECK1-NEXT: [[TMP26:%.*]] = bitcast %struct.S.0* [[TMP4]] to i8*
// CHECK1-NEXT: [[TMP27:%.*]] = bitcast %struct.S.0* [[REF_TMP]] to i8*
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 128 [[TMP26]], i8* align 4 [[TMP27]], i64 4, i1 false)
// CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[REF_TMP]]) #[[ATTR5]]
// CHECK1-NEXT: [[TMP28:%.*]] = load i32, i32* [[TMP5]], align 128
// CHECK1-NEXT: [[TMP29:%.*]] = load i32, i32* [[T_VAR15]], align 128
// CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP28]], [[TMP29]]
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK1: cond.true:
// CHECK1-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP5]], align 128
// CHECK1-NEXT: br label [[COND_END:%.*]]
// CHECK1: cond.false:
// CHECK1-NEXT: [[TMP31:%.*]] = load i32, i32* [[T_VAR15]], align 128
// CHECK1-NEXT: br label [[COND_END]]
// CHECK1: cond.end:
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[TMP30]], [[COND_TRUE]] ], [ [[TMP31]], [[COND_FALSE]] ]
// CHECK1-NEXT: store i32 [[COND]], i32* [[TMP5]], align 128
// CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB1]], i32 [[TMP18]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK1: .omp.reduction.case2:
// CHECK1-NEXT: [[TMP32:%.*]] = load i32, i32* [[T_VAR2]], align 128
// CHECK1-NEXT: [[TMP33:%.*]] = atomicrmw add i32* [[TMP1]], i32 [[TMP32]] monotonic, align 4
// CHECK1-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB2]], i32 [[TMP18]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
// CHECK1-NEXT: [[CALL10:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S.0* @_ZN1SIiEanERKS0_(%struct.S.0* nonnull align 4 dereferenceable(4) [[TMP3]], %struct.S.0* nonnull align 4 dereferenceable(4) [[VAR3]])
// CHECK1-NEXT: [[TMP34:%.*]] = bitcast %struct.S.0* [[TMP3]] to i8*
// CHECK1-NEXT: [[TMP35:%.*]] = bitcast %struct.S.0* [[CALL10]] to i8*
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 128 [[TMP34]], i8* align 4 [[TMP35]], i64 4, i1 false)
// CHECK1-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB2]], i32 [[TMP18]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
// CHECK1-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB2]], i32 [[TMP18]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
// CHECK1-NEXT: [[CALL12:%.*]] = call i32 @_ZN1SIiEcviEv(%struct.S.0* nonnull align 4 dereferenceable(4) [[TMP4]])
// CHECK1-NEXT: [[TOBOOL13:%.*]] = icmp ne i32 [[CALL12]], 0
// CHECK1-NEXT: br i1 [[TOBOOL13]], label [[LAND_RHS14:%.*]], label [[LAND_END17:%.*]]
// CHECK1: land.rhs14:
// CHECK1-NEXT: [[CALL15:%.*]] = call i32 @_ZN1SIiEcviEv(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR14]])
// CHECK1-NEXT: [[TOBOOL16:%.*]] = icmp ne i32 [[CALL15]], 0
// CHECK1-NEXT: br label [[LAND_END17]]
// CHECK1: land.end17:
// CHECK1-NEXT: [[TMP36:%.*]] = phi i1 [ false, [[DOTOMP_REDUCTION_CASE2]] ], [ [[TOBOOL16]], [[LAND_RHS14]] ]
// CHECK1-NEXT: [[CONV18:%.*]] = zext i1 [[TMP36]] to i32
// CHECK1-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[REF_TMP11]], i32 [[CONV18]])
// CHECK1-NEXT: [[TMP37:%.*]] = bitcast %struct.S.0* [[TMP4]] to i8*
// CHECK1-NEXT: [[TMP38:%.*]] = bitcast %struct.S.0* [[REF_TMP11]] to i8*
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 128 [[TMP37]], i8* align 4 [[TMP38]], i64 4, i1 false)
// CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[REF_TMP11]]) #[[ATTR5]]
// CHECK1-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB2]], i32 [[TMP18]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
// CHECK1-NEXT: [[TMP39:%.*]] = load i32, i32* [[T_VAR15]], align 128
// CHECK1-NEXT: [[TMP40:%.*]] = atomicrmw min i32* [[TMP5]], i32 [[TMP39]] monotonic, align 4
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK1: .omp.reduction.default:
// CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR14]]) #[[ATTR5]]
// CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR3]]) #[[ATTR5]]
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.9
// CHECK1-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR3]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
// CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
// CHECK1-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [4 x i8*]*
// CHECK1-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [4 x i8*]*
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 0
// CHECK1-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 0
// CHECK1-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
// CHECK1-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 1
// CHECK1-NEXT: [[TMP13:%.*]] = load i8*, i8** [[TMP12]], align 8
// CHECK1-NEXT: [[TMP14:%.*]] = bitcast i8* [[TMP13]] to %struct.S.0*
// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 1
// CHECK1-NEXT: [[TMP16:%.*]] = load i8*, i8** [[TMP15]], align 8
// CHECK1-NEXT: [[TMP17:%.*]] = bitcast i8* [[TMP16]] to %struct.S.0*
// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 2
// CHECK1-NEXT: [[TMP19:%.*]] = load i8*, i8** [[TMP18]], align 8
// CHECK1-NEXT: [[TMP20:%.*]] = bitcast i8* [[TMP19]] to %struct.S.0*
// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 2
// CHECK1-NEXT: [[TMP22:%.*]] = load i8*, i8** [[TMP21]], align 8
// CHECK1-NEXT: [[TMP23:%.*]] = bitcast i8* [[TMP22]] to %struct.S.0*
// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 3
// CHECK1-NEXT: [[TMP25:%.*]] = load i8*, i8** [[TMP24]], align 8
// CHECK1-NEXT: [[TMP26:%.*]] = bitcast i8* [[TMP25]] to i32*
// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 3
// CHECK1-NEXT: [[TMP28:%.*]] = load i8*, i8** [[TMP27]], align 8
// CHECK1-NEXT: [[TMP29:%.*]] = bitcast i8* [[TMP28]] to i32*
// CHECK1-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP11]], align 128
// CHECK1-NEXT: [[TMP31:%.*]] = load i32, i32* [[TMP8]], align 128
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP30]], [[TMP31]]
// CHECK1-NEXT: store i32 [[ADD]], i32* [[TMP11]], align 128
// CHECK1-NEXT: [[CALL:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S.0* @_ZN1SIiEanERKS0_(%struct.S.0* nonnull align 4 dereferenceable(4) [[TMP17]], %struct.S.0* nonnull align 4 dereferenceable(4) [[TMP14]])
// CHECK1-NEXT: [[TMP32:%.*]] = bitcast %struct.S.0* [[TMP17]] to i8*
// CHECK1-NEXT: [[TMP33:%.*]] = bitcast %struct.S.0* [[CALL]] to i8*
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 128 [[TMP32]], i8* align 4 [[TMP33]], i64 4, i1 false)
// CHECK1-NEXT: [[CALL2:%.*]] = call i32 @_ZN1SIiEcviEv(%struct.S.0* nonnull align 4 dereferenceable(4) [[TMP23]])
// CHECK1-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[CALL2]], 0
// CHECK1-NEXT: br i1 [[TOBOOL]], label [[LAND_RHS:%.*]], label [[LAND_END:%.*]]
// CHECK1: land.rhs:
// CHECK1-NEXT: [[CALL3:%.*]] = call i32 @_ZN1SIiEcviEv(%struct.S.0* nonnull align 4 dereferenceable(4) [[TMP20]])
// CHECK1-NEXT: [[TOBOOL4:%.*]] = icmp ne i32 [[CALL3]], 0
// CHECK1-NEXT: br label [[LAND_END]]
// CHECK1: land.end:
// CHECK1-NEXT: [[TMP34:%.*]] = phi i1 [ false, [[ENTRY:%.*]] ], [ [[TOBOOL4]], [[LAND_RHS]] ]
// CHECK1-NEXT: [[CONV:%.*]] = zext i1 [[TMP34]] to i32
// CHECK1-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[REF_TMP]], i32 [[CONV]])
// CHECK1-NEXT: [[TMP35:%.*]] = bitcast %struct.S.0* [[TMP23]] to i8*
// CHECK1-NEXT: [[TMP36:%.*]] = bitcast %struct.S.0* [[REF_TMP]] to i8*
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 128 [[TMP35]], i8* align 4 [[TMP36]], i64 4, i1 false)
// CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[REF_TMP]]) #[[ATTR5]]
// CHECK1-NEXT: [[TMP37:%.*]] = load i32, i32* [[TMP29]], align 128
// CHECK1-NEXT: [[TMP38:%.*]] = load i32, i32* [[TMP26]], align 128
// CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP37]], [[TMP38]]
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK1: cond.true:
// CHECK1-NEXT: [[TMP39:%.*]] = load i32, i32* [[TMP29]], align 128
// CHECK1-NEXT: br label [[COND_END:%.*]]
// CHECK1: cond.false:
// CHECK1-NEXT: [[TMP40:%.*]] = load i32, i32* [[TMP26]], align 128
// CHECK1-NEXT: br label [[COND_END]]
// CHECK1: cond.end:
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[TMP39]], [[COND_TRUE]] ], [ [[TMP40]], [[COND_FALSE]] ]
// CHECK1-NEXT: store i32 [[COND]], i32* [[TMP29]], align 128
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEanERKS0_
// CHECK1-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]], %struct.S.0* nonnull align 4 dereferenceable(4) [[TMP0:%.*]]) #[[ATTR0]] align 2 {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca %struct.S.0*, align 8
// CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
// CHECK1-NEXT: store %struct.S.0* [[TMP0]], %struct.S.0** [[DOTADDR]], align 8
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
// CHECK1-NEXT: ret %struct.S.0* [[THIS1]]
//
//
// CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEcviEv
// CHECK1-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) #[[ATTR0]] align 2 {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
// CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
// CHECK1-NEXT: ret i32 0
//
//
// CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiED1Ev
// CHECK1-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR7]] align 2 {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
// CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
// CHECK1-NEXT: call void @_ZN1SIiED2Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR5]]
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ev
// CHECK1-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR7]] align 2 {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
// CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
// CHECK1-NEXT: [[TMP0:%.*]] = load volatile i32, i32* @g, align 128
// CHECK1-NEXT: store i32 [[TMP0]], i32* [[F]], align 4
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@_ZN3SSTIiEC2Ev
// CHECK1-SAME: (%struct.SST* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR7]] align 2 {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SST*, align 8
// CHECK1-NEXT: [[A2:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: store %struct.SST* [[THIS]], %struct.SST** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.SST*, %struct.SST** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SST:%.*]], %struct.SST* [[THIS1]], i32 0, i32 0
// CHECK1-NEXT: store i32 0, i32* [[A]], align 4
// CHECK1-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_SST]], %struct.SST* [[THIS1]], i32 0, i32 0
// CHECK1-NEXT: store i32* [[A3]], i32** [[A2]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A2]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SST*, i32*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), %struct.SST* [[THIS1]], i32* [[TMP0]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..10
// CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SST* [[THIS:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SST*, align 8
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[A1:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[_TMP2:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
// CHECK1-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[_TMP3:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store %struct.SST* [[THIS]], %struct.SST** [[THIS_ADDR]], align 8
// CHECK1-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load %struct.SST*, %struct.SST** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK1-NEXT: store i32* [[TMP1]], i32** [[TMP]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[TMP]], align 8
// CHECK1-NEXT: store i32 1, i32* [[A1]], align 4
// CHECK1-NEXT: store i32* [[A1]], i32** [[_TMP2]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = load i32*, i32** [[_TMP2]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP4]], 1
// CHECK1-NEXT: store i32 [[INC]], i32* [[TMP3]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// CHECK1-NEXT: [[TMP6:%.*]] = bitcast i32* [[A1]] to i8*
// CHECK1-NEXT: store i8* [[TMP6]], i8** [[TMP5]], align 8
// CHECK1-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
// CHECK1-NEXT: [[TMP9:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// CHECK1-NEXT: [[TMP10:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]], i32 1, i64 8, i8* [[TMP9]], void (i8*, i8*)* @.omp.reduction.reduction_func.11, [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK1-NEXT: switch i32 [[TMP10]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
// CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
// CHECK1-NEXT: ]
// CHECK1: .omp.reduction.case1:
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[A1]], align 4
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], [[TMP12]]
// CHECK1-NEXT: store i32 [[MUL]], i32* [[TMP2]], align 4
// CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK1: .omp.reduction.case2:
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[A1]], align 4
// CHECK1-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic i32, i32* [[TMP2]] monotonic, align 4
// CHECK1-NEXT: br label [[ATOMIC_CONT:%.*]]
// CHECK1: atomic_cont:
// CHECK1-NEXT: [[TMP14:%.*]] = phi i32 [ [[ATOMIC_LOAD]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[TMP19:%.*]], [[ATOMIC_CONT]] ]
// CHECK1-NEXT: store i32 [[TMP14]], i32* [[_TMP3]], align 4
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[_TMP3]], align 4
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[A1]], align 4
// CHECK1-NEXT: [[MUL4:%.*]] = mul nsw i32 [[TMP15]], [[TMP16]]
// CHECK1-NEXT: store i32 [[MUL4]], i32* [[ATOMIC_TEMP]], align 4
// CHECK1-NEXT: [[TMP17:%.*]] = load i32, i32* [[ATOMIC_TEMP]], align 4
// CHECK1-NEXT: [[TMP18:%.*]] = cmpxchg i32* [[TMP2]], i32 [[TMP14]], i32 [[TMP17]] monotonic monotonic, align 4
// CHECK1-NEXT: [[TMP19]] = extractvalue { i32, i1 } [[TMP18]], 0
// CHECK1-NEXT: [[TMP20:%.*]] = extractvalue { i32, i1 } [[TMP18]], 1
// CHECK1-NEXT: br i1 [[TMP20]], label [[ATOMIC_EXIT:%.*]], label [[ATOMIC_CONT]]
// CHECK1: atomic_exit:
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK1: .omp.reduction.default:
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.11
// CHECK1-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR3]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
// CHECK1-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
// CHECK1-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
// CHECK1-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
// CHECK1-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
// CHECK1-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP8]], align 4
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP12]], [[TMP13]]
// CHECK1-NEXT: store i32 [[MUL]], i32* [[TMP11]], align 4
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ei
// CHECK1-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 [[A:%.*]]) unnamed_addr #[[ATTR7]] align 2 {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
// CHECK1-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
// CHECK1-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK1-NEXT: [[TMP1:%.*]] = load volatile i32, i32* @g, align 128
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], [[TMP1]]
// CHECK1-NEXT: store i32 [[ADD]], i32* [[F]], align 4
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiED2Ev
// CHECK1-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR7]] align 2 {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
// CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
// CHECK1-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@_Z14foo_array_sectPs
// CHECK2-SAME: (i16* [[X:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[X_ADDR:%.*]] = alloca i16*, align 8
// CHECK2-NEXT: store i16* [[X]], i16** [[X_ADDR]], align 8
// CHECK2-NEXT: [[TMP0:%.*]] = load i16*, i16** [[X_ADDR]], align 8
// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i16*)* @.omp_outlined. to void (i32*, i32*, ...)*), i16* [[TMP0]])
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i16* [[X:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[X_ADDR:%.*]] = alloca i16*, align 8
// CHECK2-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
// CHECK2-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[TMP:%.*]] = alloca i16*, align 8
// CHECK2-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x i8*], align 8
// CHECK2-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i16, align 2
// CHECK2-NEXT: [[_TMP13:%.*]] = alloca i16, align 2
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK2-NEXT: store i16* [[X]], i16** [[X_ADDR]], align 8
// CHECK2-NEXT: [[TMP0:%.*]] = load i16*, i16** [[X_ADDR]], align 8
// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP0]], i64 0
// CHECK2-NEXT: [[TMP1:%.*]] = load i16*, i16** [[X_ADDR]], align 8
// CHECK2-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i16, i16* [[TMP1]], i64 0
// CHECK2-NEXT: [[TMP2:%.*]] = ptrtoint i16* [[ARRAYIDX1]] to i64
// CHECK2-NEXT: [[TMP3:%.*]] = ptrtoint i16* [[ARRAYIDX]] to i64
// CHECK2-NEXT: [[TMP4:%.*]] = sub i64 [[TMP2]], [[TMP3]]
// CHECK2-NEXT: [[TMP5:%.*]] = sdiv exact i64 [[TMP4]], ptrtoint (i16* getelementptr (i16, i16* null, i32 1) to i64)
// CHECK2-NEXT: [[TMP6:%.*]] = add nuw i64 [[TMP5]], 1
// CHECK2-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], ptrtoint (i16* getelementptr (i16, i16* null, i32 1) to i64)
// CHECK2-NEXT: [[TMP8:%.*]] = call i8* @llvm.stacksave()
// CHECK2-NEXT: store i8* [[TMP8]], i8** [[SAVED_STACK]], align 8
// CHECK2-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP6]], align 16
// CHECK2-NEXT: store i64 [[TMP6]], i64* [[__VLA_EXPR0]], align 8
// CHECK2-NEXT: [[TMP9:%.*]] = getelementptr i16, i16* [[VLA]], i64 [[TMP6]]
// CHECK2-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i16* [[VLA]], [[TMP9]]
// CHECK2-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]]
// CHECK2: omp.arrayinit.body:
// CHECK2-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i16* [ [[VLA]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ]
// CHECK2-NEXT: store i16 0, i16* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 2
// CHECK2-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i16, i16* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
// CHECK2-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i16* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP9]]
// CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]]
// CHECK2: omp.arrayinit.done:
// CHECK2-NEXT: [[TMP10:%.*]] = load i16*, i16** [[X_ADDR]], align 8
// CHECK2-NEXT: [[TMP11:%.*]] = ptrtoint i16* [[TMP10]] to i64
// CHECK2-NEXT: [[TMP12:%.*]] = ptrtoint i16* [[ARRAYIDX]] to i64
// CHECK2-NEXT: [[TMP13:%.*]] = sub i64 [[TMP11]], [[TMP12]]
// CHECK2-NEXT: [[TMP14:%.*]] = sdiv exact i64 [[TMP13]], ptrtoint (i16* getelementptr (i16, i16* null, i32 1) to i64)
// CHECK2-NEXT: [[TMP15:%.*]] = getelementptr i16, i16* [[VLA]], i64 [[TMP14]]
// CHECK2-NEXT: store i16* [[TMP15]], i16** [[TMP]], align 8
// CHECK2-NEXT: [[TMP16:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// CHECK2-NEXT: [[TMP17:%.*]] = bitcast i16* [[VLA]] to i8*
// CHECK2-NEXT: store i8* [[TMP17]], i8** [[TMP16]], align 8
// CHECK2-NEXT: [[TMP18:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1
// CHECK2-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP6]] to i8*
// CHECK2-NEXT: store i8* [[TMP19]], i8** [[TMP18]], align 8
// CHECK2-NEXT: [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK2-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
// CHECK2-NEXT: [[TMP22:%.*]] = bitcast [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// CHECK2-NEXT: [[TMP23:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP21]], i32 1, i64 16, i8* [[TMP22]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK2-NEXT: switch i32 [[TMP23]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// CHECK2-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
// CHECK2-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
// CHECK2-NEXT: ]
// CHECK2: .omp.reduction.case1:
// CHECK2-NEXT: [[TMP24:%.*]] = getelementptr i16, i16* [[ARRAYIDX]], i64 [[TMP6]]
// CHECK2-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i16* [[ARRAYIDX]], [[TMP24]]
// CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE7:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
// CHECK2: omp.arraycpy.body:
// CHECK2-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i16* [ [[VLA]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK2-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST2:%.*]] = phi i16* [ [[ARRAYIDX]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT5:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK2-NEXT: [[TMP25:%.*]] = load i16, i16* [[OMP_ARRAYCPY_DESTELEMENTPAST2]], align 2
// CHECK2-NEXT: [[CONV:%.*]] = sext i16 [[TMP25]] to i32
// CHECK2-NEXT: [[TMP26:%.*]] = load i16, i16* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 2
// CHECK2-NEXT: [[CONV3:%.*]] = sext i16 [[TMP26]] to i32
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CONV3]]
// CHECK2-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD]] to i16
// CHECK2-NEXT: store i16 [[CONV4]], i16* [[OMP_ARRAYCPY_DESTELEMENTPAST2]], align 2
// CHECK2-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT5]] = getelementptr i16, i16* [[OMP_ARRAYCPY_DESTELEMENTPAST2]], i32 1
// CHECK2-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i16, i16* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
// CHECK2-NEXT: [[OMP_ARRAYCPY_DONE6:%.*]] = icmp eq i16* [[OMP_ARRAYCPY_DEST_ELEMENT5]], [[TMP24]]
// CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_DONE6]], label [[OMP_ARRAYCPY_DONE7]], label [[OMP_ARRAYCPY_BODY]]
// CHECK2: omp.arraycpy.done7:
// CHECK2-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK2: .omp.reduction.case2:
// CHECK2-NEXT: [[TMP27:%.*]] = getelementptr i16, i16* [[ARRAYIDX]], i64 [[TMP6]]
// CHECK2-NEXT: [[OMP_ARRAYCPY_ISEMPTY8:%.*]] = icmp eq i16* [[ARRAYIDX]], [[TMP27]]
// CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY8]], label [[OMP_ARRAYCPY_DONE21:%.*]], label [[OMP_ARRAYCPY_BODY9:%.*]]
// CHECK2: omp.arraycpy.body9:
// CHECK2-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST10:%.*]] = phi i16* [ [[VLA]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT19:%.*]], [[ATOMIC_EXIT:%.*]] ]
// CHECK2-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST11:%.*]] = phi i16* [ [[ARRAYIDX]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT18:%.*]], [[ATOMIC_EXIT]] ]
// CHECK2-NEXT: [[TMP28:%.*]] = load i16, i16* [[OMP_ARRAYCPY_SRCELEMENTPAST10]], align 2
// CHECK2-NEXT: [[CONV12:%.*]] = sext i16 [[TMP28]] to i32
// CHECK2-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic i16, i16* [[OMP_ARRAYCPY_DESTELEMENTPAST11]] monotonic, align 2
// CHECK2-NEXT: br label [[ATOMIC_CONT:%.*]]
// CHECK2: atomic_cont:
// CHECK2-NEXT: [[TMP29:%.*]] = phi i16 [ [[ATOMIC_LOAD]], [[OMP_ARRAYCPY_BODY9]] ], [ [[TMP34:%.*]], [[ATOMIC_CONT]] ]
// CHECK2-NEXT: store i16 [[TMP29]], i16* [[_TMP13]], align 2
// CHECK2-NEXT: [[TMP30:%.*]] = load i16, i16* [[_TMP13]], align 2
// CHECK2-NEXT: [[CONV14:%.*]] = sext i16 [[TMP30]] to i32
// CHECK2-NEXT: [[TMP31:%.*]] = load i16, i16* [[OMP_ARRAYCPY_SRCELEMENTPAST10]], align 2
// CHECK2-NEXT: [[CONV15:%.*]] = sext i16 [[TMP31]] to i32
// CHECK2-NEXT: [[ADD16:%.*]] = add nsw i32 [[CONV14]], [[CONV15]]
// CHECK2-NEXT: [[CONV17:%.*]] = trunc i32 [[ADD16]] to i16
// CHECK2-NEXT: store i16 [[CONV17]], i16* [[ATOMIC_TEMP]], align 2
// CHECK2-NEXT: [[TMP32:%.*]] = load i16, i16* [[ATOMIC_TEMP]], align 2
// CHECK2-NEXT: [[TMP33:%.*]] = cmpxchg i16* [[OMP_ARRAYCPY_DESTELEMENTPAST11]], i16 [[TMP29]], i16 [[TMP32]] monotonic monotonic, align 2
// CHECK2-NEXT: [[TMP34]] = extractvalue { i16, i1 } [[TMP33]], 0
// CHECK2-NEXT: [[TMP35:%.*]] = extractvalue { i16, i1 } [[TMP33]], 1
// CHECK2-NEXT: br i1 [[TMP35]], label [[ATOMIC_EXIT]], label [[ATOMIC_CONT]]
// CHECK2: atomic_exit:
// CHECK2-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT18]] = getelementptr i16, i16* [[OMP_ARRAYCPY_DESTELEMENTPAST11]], i32 1
// CHECK2-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT19]] = getelementptr i16, i16* [[OMP_ARRAYCPY_SRCELEMENTPAST10]], i32 1
// CHECK2-NEXT: [[OMP_ARRAYCPY_DONE20:%.*]] = icmp eq i16* [[OMP_ARRAYCPY_DEST_ELEMENT18]], [[TMP27]]
// CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_DONE20]], label [[OMP_ARRAYCPY_DONE21]], label [[OMP_ARRAYCPY_BODY9]]
// CHECK2: omp.arraycpy.done21:
// CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK2: .omp.reduction.default:
// CHECK2-NEXT: [[TMP36:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
// CHECK2-NEXT: call void @llvm.stackrestore(i8* [[TMP36]])
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func
// CHECK2-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR3:[0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
// CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
// CHECK2-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
// CHECK2-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
// CHECK2-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
// CHECK2-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [2 x i8*]*
// CHECK2-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
// CHECK2-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [2 x i8*]*
// CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i64 0, i64 0
// CHECK2-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// CHECK2-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i16*
// CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP3]], i64 0, i64 0
// CHECK2-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
// CHECK2-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i16*
// CHECK2-NEXT: [[TMP12:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP3]], i64 0, i64 1
// CHECK2-NEXT: [[TMP13:%.*]] = load i8*, i8** [[TMP12]], align 8
// CHECK2-NEXT: [[TMP14:%.*]] = ptrtoint i8* [[TMP13]] to i64
// CHECK2-NEXT: [[TMP15:%.*]] = getelementptr i16, i16* [[TMP11]], i64 [[TMP14]]
// CHECK2-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i16* [[TMP11]], [[TMP15]]
// CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE4:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
// CHECK2: omp.arraycpy.body:
// CHECK2-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i16* [ [[TMP8]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK2-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i16* [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK2-NEXT: [[TMP16:%.*]] = load i16, i16* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 2
// CHECK2-NEXT: [[CONV:%.*]] = sext i16 [[TMP16]] to i32
// CHECK2-NEXT: [[TMP17:%.*]] = load i16, i16* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 2
// CHECK2-NEXT: [[CONV2:%.*]] = sext i16 [[TMP17]] to i32
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CONV2]]
// CHECK2-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD]] to i16
// CHECK2-NEXT: store i16 [[CONV3]], i16* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 2
// CHECK2-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i16, i16* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
// CHECK2-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i16, i16* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
// CHECK2-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i16* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP15]]
// CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE4]], label [[OMP_ARRAYCPY_BODY]]
// CHECK2: omp.arraycpy.done4:
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@main
// CHECK2-SAME: () #[[ATTR6:[0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[SS:%.*]] = alloca [[STRUCT_SS:%.*]], align 8
// CHECK2-NEXT: [[TEST:%.*]] = alloca [[STRUCT_S:%.*]], align 4
// CHECK2-NEXT: [[T_VAR:%.*]] = alloca float, align 4
// CHECK2-NEXT: [[T_VAR1:%.*]] = alloca float, align 4
// CHECK2-NEXT: [[VEC:%.*]] = alloca [2 x i32], align 4
// CHECK2-NEXT: [[S_ARR:%.*]] = alloca [2 x %struct.S], align 4
// CHECK2-NEXT: [[VAR:%.*]] = alloca [[STRUCT_S]], align 4
// CHECK2-NEXT: [[VAR1:%.*]] = alloca [[STRUCT_S]], align 4
// CHECK2-NEXT: [[CF:%.*]] = alloca { float, float }, align 4
// CHECK2-NEXT: store i32 0, i32* [[RETVAL]], align 4
// CHECK2-NEXT: call void @_ZN2SSC1ERi(%struct.SS* nonnull align 8 dereferenceable(16) [[SS]], i32* nonnull align 4 dereferenceable(4) @sivar)
// CHECK2-NEXT: call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[TEST]])
// CHECK2-NEXT: store float 0.000000e+00, float* [[T_VAR]], align 4
// CHECK2-NEXT: [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
// CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const.main.vec to i8*), i64 8, i1 false)
// CHECK2-NEXT: [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i64 0, i64 0
// CHECK2-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN]], float 1.000000e+00)
// CHECK2-NEXT: [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYINIT_BEGIN]], i64 1
// CHECK2-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], float 2.000000e+00)
// CHECK2-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[VAR]], float 3.000000e+00)
// CHECK2-NEXT: call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR1]])
// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [2 x i32]*, float*, [2 x %struct.S]*, %struct.S*, %struct.S*, float*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), [2 x i32]* [[VEC]], float* [[T_VAR]], [2 x %struct.S]* [[S_ARR]], %struct.S* [[VAR]], %struct.S* [[VAR1]], float* [[T_VAR1]])
// CHECK2-NEXT: [[CALL:%.*]] = call float @_ZN1SIfEcvfEv(%struct.S* nonnull align 4 dereferenceable(4) [[VAR1]])
// CHECK2-NEXT: [[TOBOOL:%.*]] = fcmp une float [[CALL]], 0.000000e+00
// CHECK2-NEXT: br i1 [[TOBOOL]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
// CHECK2: if.then:
// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [2 x i32]*, float*, [2 x %struct.S]*, %struct.S*, %struct.S*, float*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), [2 x i32]* [[VEC]], float* [[T_VAR]], [2 x %struct.S]* [[S_ARR]], %struct.S* [[VAR]], %struct.S* [[VAR1]], float* [[T_VAR1]])
// CHECK2-NEXT: br label [[IF_END]]
// CHECK2: if.end:
// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, { float, float }*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), { float, float }* [[CF]])
// CHECK2-NEXT: [[CALL1:%.*]] = call i32 @_Z5tmainIiET_v()
// CHECK2-NEXT: store i32 [[CALL1]], i32* [[RETVAL]], align 4
// CHECK2-NEXT: call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR1]]) #[[ATTR5:[0-9]+]]
// CHECK2-NEXT: call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR]]) #[[ATTR5]]
// CHECK2-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 0
// CHECK2-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN]], i64 2
// CHECK2-NEXT: br label [[ARRAYDESTROY_BODY:%.*]]
// CHECK2: arraydestroy.body:
// CHECK2-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP1]], [[IF_END]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
// CHECK2-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
// CHECK2-NEXT: call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR5]]
// CHECK2-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
// CHECK2-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE2:%.*]], label [[ARRAYDESTROY_BODY]]
// CHECK2: arraydestroy.done2:
// CHECK2-NEXT: call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR5]]
// CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[RETVAL]], align 4
// CHECK2-NEXT: ret i32 [[TMP2]]
//
//
// CHECK2-LABEL: define {{[^@]+}}@_ZN2SSC1ERi
// CHECK2-SAME: (%struct.SS* nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32* nonnull align 4 dereferenceable(4) [[D:%.*]]) unnamed_addr #[[ATTR7:[0-9]+]] align 2 {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK2-NEXT: [[D_ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK2-NEXT: store i32* [[D]], i32** [[D_ADDR]], align 8
// CHECK2-NEXT: [[THIS1:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK2-NEXT: [[TMP0:%.*]] = load i32*, i32** [[D_ADDR]], align 8
// CHECK2-NEXT: call void @_ZN2SSC2ERi(%struct.SS* nonnull align 8 dereferenceable(16) [[THIS1]], i32* nonnull align 4 dereferenceable(4) [[TMP0]])
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ev
// CHECK2-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR7]] align 2 {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
// CHECK2-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
// CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
// CHECK2-NEXT: call void @_ZN1SIfEC2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]])
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ef
// CHECK2-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR7]] align 2 {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca float, align 4
// CHECK2-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
// CHECK2-NEXT: store float [[A]], float* [[A_ADDR]], align 4
// CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
// CHECK2-NEXT: [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
// CHECK2-NEXT: call void @_ZN1SIfEC2Ef(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]], float [[TMP0]])
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [2 x i32]* nonnull align 4 dereferenceable(8) [[VEC:%.*]], float* nonnull align 4 dereferenceable(4) [[T_VAR:%.*]], [2 x %struct.S]* nonnull align 4 dereferenceable(8) [[S_ARR:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[VAR:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[VAR1:%.*]], float* nonnull align 4 dereferenceable(4) [[T_VAR1:%.*]]) #[[ATTR1]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 8
// CHECK2-NEXT: [[T_VAR_ADDR:%.*]] = alloca float*, align 8
// CHECK2-NEXT: [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S]*, align 8
// CHECK2-NEXT: [[VAR_ADDR:%.*]] = alloca %struct.S*, align 8
// CHECK2-NEXT: [[VAR1_ADDR:%.*]] = alloca %struct.S*, align 8
// CHECK2-NEXT: [[T_VAR1_ADDR:%.*]] = alloca float*, align 8
// CHECK2-NEXT: [[T_VAR2:%.*]] = alloca float, align 4
// CHECK2-NEXT: [[VAR3:%.*]] = alloca [[STRUCT_S:%.*]], align 4
// CHECK2-NEXT: [[VAR14:%.*]] = alloca [[STRUCT_S]], align 4
// CHECK2-NEXT: [[T_VAR15:%.*]] = alloca float, align 4
// CHECK2-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [4 x i8*], align 8
// CHECK2-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S]], align 4
// CHECK2-NEXT: [[ATOMIC_TEMP:%.*]] = alloca float, align 4
// CHECK2-NEXT: [[TMP:%.*]] = alloca float, align 4
// CHECK2-NEXT: [[REF_TMP13:%.*]] = alloca [[STRUCT_S]], align 4
// CHECK2-NEXT: [[ATOMIC_TEMP23:%.*]] = alloca float, align 4
// CHECK2-NEXT: [[_TMP24:%.*]] = alloca float, align 4
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK2-NEXT: store [2 x i32]* [[VEC]], [2 x i32]** [[VEC_ADDR]], align 8
// CHECK2-NEXT: store float* [[T_VAR]], float** [[T_VAR_ADDR]], align 8
// CHECK2-NEXT: store [2 x %struct.S]* [[S_ARR]], [2 x %struct.S]** [[S_ARR_ADDR]], align 8
// CHECK2-NEXT: store %struct.S* [[VAR]], %struct.S** [[VAR_ADDR]], align 8
// CHECK2-NEXT: store %struct.S* [[VAR1]], %struct.S** [[VAR1_ADDR]], align 8
// CHECK2-NEXT: store float* [[T_VAR1]], float** [[T_VAR1_ADDR]], align 8
// CHECK2-NEXT: [[TMP0:%.*]] = load [2 x i32]*, [2 x i32]** [[VEC_ADDR]], align 8
// CHECK2-NEXT: [[TMP1:%.*]] = load float*, float** [[T_VAR_ADDR]], align 8
// CHECK2-NEXT: [[TMP2:%.*]] = load [2 x %struct.S]*, [2 x %struct.S]** [[S_ARR_ADDR]], align 8
// CHECK2-NEXT: [[TMP3:%.*]] = load %struct.S*, %struct.S** [[VAR_ADDR]], align 8
// CHECK2-NEXT: [[TMP4:%.*]] = load %struct.S*, %struct.S** [[VAR1_ADDR]], align 8
// CHECK2-NEXT: [[TMP5:%.*]] = load float*, float** [[T_VAR1_ADDR]], align 8
// CHECK2-NEXT: store float 0.000000e+00, float* [[T_VAR2]], align 4
// CHECK2-NEXT: call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR3]])
// CHECK2-NEXT: call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR14]])
// CHECK2-NEXT: store float 0x47EFFFFFE0000000, float* [[T_VAR15]], align 4
// CHECK2-NEXT: [[TMP6:%.*]] = load float, float* [[T_VAR2]], align 4
// CHECK2-NEXT: [[CONV:%.*]] = fptosi float [[TMP6]] to i32
// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[TMP0]], i64 0, i64 0
// CHECK2-NEXT: store i32 [[CONV]], i32* [[ARRAYIDX]], align 4
// CHECK2-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[TMP2]], i64 0, i64 0
// CHECK2-NEXT: [[TMP7:%.*]] = bitcast %struct.S* [[ARRAYIDX6]] to i8*
// CHECK2-NEXT: [[TMP8:%.*]] = bitcast %struct.S* [[VAR3]] to i8*
// CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP7]], i8* align 4 [[TMP8]], i64 4, i1 false)
// CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// CHECK2-NEXT: [[TMP10:%.*]] = bitcast float* [[T_VAR2]] to i8*
// CHECK2-NEXT: store i8* [[TMP10]], i8** [[TMP9]], align 8
// CHECK2-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1
// CHECK2-NEXT: [[TMP12:%.*]] = bitcast %struct.S* [[VAR3]] to i8*
// CHECK2-NEXT: store i8* [[TMP12]], i8** [[TMP11]], align 8
// CHECK2-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 2
// CHECK2-NEXT: [[TMP14:%.*]] = bitcast %struct.S* [[VAR14]] to i8*
// CHECK2-NEXT: store i8* [[TMP14]], i8** [[TMP13]], align 8
// CHECK2-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 3
// CHECK2-NEXT: [[TMP16:%.*]] = bitcast float* [[T_VAR15]] to i8*
// CHECK2-NEXT: store i8* [[TMP16]], i8** [[TMP15]], align 8
// CHECK2-NEXT: [[TMP17:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK2-NEXT: [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 4
// CHECK2-NEXT: [[TMP19:%.*]] = bitcast [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// CHECK2-NEXT: [[TMP20:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB1]], i32 [[TMP18]], i32 4, i64 32, i8* [[TMP19]], void (i8*, i8*)* @.omp.reduction.reduction_func.2, [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK2-NEXT: switch i32 [[TMP20]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// CHECK2-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
// CHECK2-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
// CHECK2-NEXT: ]
// CHECK2: .omp.reduction.case1:
// CHECK2-NEXT: [[TMP21:%.*]] = load float, float* [[TMP1]], align 4
// CHECK2-NEXT: [[TMP22:%.*]] = load float, float* [[T_VAR2]], align 4
// CHECK2-NEXT: [[ADD:%.*]] = fadd float [[TMP21]], [[TMP22]]
// CHECK2-NEXT: store float [[ADD]], float* [[TMP1]], align 4
// CHECK2-NEXT: [[CALL:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S* @_ZN1SIfEanERKS0_(%struct.S* nonnull align 4 dereferenceable(4) [[TMP3]], %struct.S* nonnull align 4 dereferenceable(4) [[VAR3]])
// CHECK2-NEXT: [[TMP23:%.*]] = bitcast %struct.S* [[TMP3]] to i8*
// CHECK2-NEXT: [[TMP24:%.*]] = bitcast %struct.S* [[CALL]] to i8*
// CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP23]], i8* align 4 [[TMP24]], i64 4, i1 false)
// CHECK2-NEXT: [[CALL7:%.*]] = call float @_ZN1SIfEcvfEv(%struct.S* nonnull align 4 dereferenceable(4) [[TMP4]])
// CHECK2-NEXT: [[TOBOOL:%.*]] = fcmp une float [[CALL7]], 0.000000e+00
// CHECK2-NEXT: br i1 [[TOBOOL]], label [[LAND_RHS:%.*]], label [[LAND_END:%.*]]
// CHECK2: land.rhs:
// CHECK2-NEXT: [[CALL8:%.*]] = call float @_ZN1SIfEcvfEv(%struct.S* nonnull align 4 dereferenceable(4) [[VAR14]])
// CHECK2-NEXT: [[TOBOOL9:%.*]] = fcmp une float [[CALL8]], 0.000000e+00
// CHECK2-NEXT: br label [[LAND_END]]
// CHECK2: land.end:
// CHECK2-NEXT: [[TMP25:%.*]] = phi i1 [ false, [[DOTOMP_REDUCTION_CASE1]] ], [ [[TOBOOL9]], [[LAND_RHS]] ]
// CHECK2-NEXT: [[CONV10:%.*]] = uitofp i1 [[TMP25]] to float
// CHECK2-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP]], float [[CONV10]])
// CHECK2-NEXT: [[TMP26:%.*]] = bitcast %struct.S* [[TMP4]] to i8*
// CHECK2-NEXT: [[TMP27:%.*]] = bitcast %struct.S* [[REF_TMP]] to i8*
// CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP26]], i8* align 4 [[TMP27]], i64 4, i1 false)
// CHECK2-NEXT: call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP]]) #[[ATTR5]]
// CHECK2-NEXT: [[TMP28:%.*]] = load float, float* [[TMP5]], align 4
// CHECK2-NEXT: [[TMP29:%.*]] = load float, float* [[T_VAR15]], align 4
// CHECK2-NEXT: [[CMP:%.*]] = fcmp olt float [[TMP28]], [[TMP29]]
// CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK2: cond.true:
// CHECK2-NEXT: [[TMP30:%.*]] = load float, float* [[TMP5]], align 4
// CHECK2-NEXT: br label [[COND_END:%.*]]
// CHECK2: cond.false:
// CHECK2-NEXT: [[TMP31:%.*]] = load float, float* [[T_VAR15]], align 4
// CHECK2-NEXT: br label [[COND_END]]
// CHECK2: cond.end:
// CHECK2-NEXT: [[COND:%.*]] = phi float [ [[TMP30]], [[COND_TRUE]] ], [ [[TMP31]], [[COND_FALSE]] ]
// CHECK2-NEXT: store float [[COND]], float* [[TMP5]], align 4
// CHECK2-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB1]], i32 [[TMP18]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK2: .omp.reduction.case2:
// CHECK2-NEXT: [[TMP32:%.*]] = load float, float* [[T_VAR2]], align 4
// CHECK2-NEXT: [[TMP33:%.*]] = bitcast float* [[TMP1]] to i32*
// CHECK2-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic i32, i32* [[TMP33]] monotonic, align 4
// CHECK2-NEXT: br label [[ATOMIC_CONT:%.*]]
// CHECK2: atomic_cont:
// CHECK2-NEXT: [[TMP34:%.*]] = phi i32 [ [[ATOMIC_LOAD]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[TMP42:%.*]], [[ATOMIC_CONT]] ]
// CHECK2-NEXT: [[TMP35:%.*]] = bitcast float* [[ATOMIC_TEMP]] to i32*
// CHECK2-NEXT: [[TMP36:%.*]] = bitcast i32 [[TMP34]] to float
// CHECK2-NEXT: store float [[TMP36]], float* [[TMP]], align 4
// CHECK2-NEXT: [[TMP37:%.*]] = load float, float* [[TMP]], align 4
// CHECK2-NEXT: [[TMP38:%.*]] = load float, float* [[T_VAR2]], align 4
// CHECK2-NEXT: [[ADD11:%.*]] = fadd float [[TMP37]], [[TMP38]]
// CHECK2-NEXT: store float [[ADD11]], float* [[ATOMIC_TEMP]], align 4
// CHECK2-NEXT: [[TMP39:%.*]] = load i32, i32* [[TMP35]], align 4
// CHECK2-NEXT: [[TMP40:%.*]] = bitcast float* [[TMP1]] to i32*
// CHECK2-NEXT: [[TMP41:%.*]] = cmpxchg i32* [[TMP40]], i32 [[TMP34]], i32 [[TMP39]] monotonic monotonic, align 4
// CHECK2-NEXT: [[TMP42]] = extractvalue { i32, i1 } [[TMP41]], 0
// CHECK2-NEXT: [[TMP43:%.*]] = extractvalue { i32, i1 } [[TMP41]], 1
// CHECK2-NEXT: br i1 [[TMP43]], label [[ATOMIC_EXIT:%.*]], label [[ATOMIC_CONT]]
// CHECK2: atomic_exit:
// CHECK2-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB2]], i32 [[TMP18]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
// CHECK2-NEXT: [[CALL12:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S* @_ZN1SIfEanERKS0_(%struct.S* nonnull align 4 dereferenceable(4) [[TMP3]], %struct.S* nonnull align 4 dereferenceable(4) [[VAR3]])
// CHECK2-NEXT: [[TMP44:%.*]] = bitcast %struct.S* [[TMP3]] to i8*
// CHECK2-NEXT: [[TMP45:%.*]] = bitcast %struct.S* [[CALL12]] to i8*
// CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP44]], i8* align 4 [[TMP45]], i64 4, i1 false)
// CHECK2-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB2]], i32 [[TMP18]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
// CHECK2-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB2]], i32 [[TMP18]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
// CHECK2-NEXT: [[CALL14:%.*]] = call float @_ZN1SIfEcvfEv(%struct.S* nonnull align 4 dereferenceable(4) [[TMP4]])
// CHECK2-NEXT: [[TOBOOL15:%.*]] = fcmp une float [[CALL14]], 0.000000e+00
// CHECK2-NEXT: br i1 [[TOBOOL15]], label [[LAND_RHS16:%.*]], label [[LAND_END19:%.*]]
// CHECK2: land.rhs16:
// CHECK2-NEXT: [[CALL17:%.*]] = call float @_ZN1SIfEcvfEv(%struct.S* nonnull align 4 dereferenceable(4) [[VAR14]])
// CHECK2-NEXT: [[TOBOOL18:%.*]] = fcmp une float [[CALL17]], 0.000000e+00
// CHECK2-NEXT: br label [[LAND_END19]]
// CHECK2: land.end19:
// CHECK2-NEXT: [[TMP46:%.*]] = phi i1 [ false, [[ATOMIC_EXIT]] ], [ [[TOBOOL18]], [[LAND_RHS16]] ]
// CHECK2-NEXT: [[CONV20:%.*]] = uitofp i1 [[TMP46]] to float
// CHECK2-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP13]], float [[CONV20]])
// CHECK2-NEXT: [[TMP47:%.*]] = bitcast %struct.S* [[TMP4]] to i8*
// CHECK2-NEXT: [[TMP48:%.*]] = bitcast %struct.S* [[REF_TMP13]] to i8*
// CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP47]], i8* align 4 [[TMP48]], i64 4, i1 false)
// CHECK2-NEXT: call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP13]]) #[[ATTR5]]
// CHECK2-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB2]], i32 [[TMP18]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
// CHECK2-NEXT: [[TMP49:%.*]] = load float, float* [[T_VAR15]], align 4
// CHECK2-NEXT: [[TMP50:%.*]] = bitcast float* [[TMP5]] to i32*
// CHECK2-NEXT: [[ATOMIC_LOAD21:%.*]] = load atomic i32, i32* [[TMP50]] monotonic, align 4
// CHECK2-NEXT: br label [[ATOMIC_CONT22:%.*]]
// CHECK2: atomic_cont22:
// CHECK2-NEXT: [[TMP51:%.*]] = phi i32 [ [[ATOMIC_LOAD21]], [[LAND_END19]] ], [ [[TMP61:%.*]], [[COND_END28:%.*]] ]
// CHECK2-NEXT: [[TMP52:%.*]] = bitcast float* [[ATOMIC_TEMP23]] to i32*
// CHECK2-NEXT: [[TMP53:%.*]] = bitcast i32 [[TMP51]] to float
// CHECK2-NEXT: store float [[TMP53]], float* [[_TMP24]], align 4
// CHECK2-NEXT: [[TMP54:%.*]] = load float, float* [[_TMP24]], align 4
// CHECK2-NEXT: [[TMP55:%.*]] = load float, float* [[T_VAR15]], align 4
// CHECK2-NEXT: [[CMP25:%.*]] = fcmp olt float [[TMP54]], [[TMP55]]
// CHECK2-NEXT: br i1 [[CMP25]], label [[COND_TRUE26:%.*]], label [[COND_FALSE27:%.*]]
// CHECK2: cond.true26:
// CHECK2-NEXT: [[TMP56:%.*]] = load float, float* [[_TMP24]], align 4
// CHECK2-NEXT: br label [[COND_END28]]
// CHECK2: cond.false27:
// CHECK2-NEXT: [[TMP57:%.*]] = load float, float* [[T_VAR15]], align 4
// CHECK2-NEXT: br label [[COND_END28]]
// CHECK2: cond.end28:
// CHECK2-NEXT: [[COND29:%.*]] = phi float [ [[TMP56]], [[COND_TRUE26]] ], [ [[TMP57]], [[COND_FALSE27]] ]
// CHECK2-NEXT: store float [[COND29]], float* [[ATOMIC_TEMP23]], align 4
// CHECK2-NEXT: [[TMP58:%.*]] = load i32, i32* [[TMP52]], align 4
// CHECK2-NEXT: [[TMP59:%.*]] = bitcast float* [[TMP5]] to i32*
// CHECK2-NEXT: [[TMP60:%.*]] = cmpxchg i32* [[TMP59]], i32 [[TMP51]], i32 [[TMP58]] monotonic monotonic, align 4
// CHECK2-NEXT: [[TMP61]] = extractvalue { i32, i1 } [[TMP60]], 0
// CHECK2-NEXT: [[TMP62:%.*]] = extractvalue { i32, i1 } [[TMP60]], 1
// CHECK2-NEXT: br i1 [[TMP62]], label [[ATOMIC_EXIT30:%.*]], label [[ATOMIC_CONT22]]
// CHECK2: atomic_exit30:
// CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK2: .omp.reduction.default:
// CHECK2-NEXT: call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR14]]) #[[ATTR5]]
// CHECK2-NEXT: call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR3]]) #[[ATTR5]]
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.2
// CHECK2-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR3]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
// CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
// CHECK2-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S:%.*]], align 4
// CHECK2-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
// CHECK2-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
// CHECK2-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
// CHECK2-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [4 x i8*]*
// CHECK2-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
// CHECK2-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [4 x i8*]*
// CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 0
// CHECK2-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// CHECK2-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to float*
// CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 0
// CHECK2-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
// CHECK2-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to float*
// CHECK2-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 1
// CHECK2-NEXT: [[TMP13:%.*]] = load i8*, i8** [[TMP12]], align 8
// CHECK2-NEXT: [[TMP14:%.*]] = bitcast i8* [[TMP13]] to %struct.S*
// CHECK2-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 1
// CHECK2-NEXT: [[TMP16:%.*]] = load i8*, i8** [[TMP15]], align 8
// CHECK2-NEXT: [[TMP17:%.*]] = bitcast i8* [[TMP16]] to %struct.S*
// CHECK2-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 2
// CHECK2-NEXT: [[TMP19:%.*]] = load i8*, i8** [[TMP18]], align 8
// CHECK2-NEXT: [[TMP20:%.*]] = bitcast i8* [[TMP19]] to %struct.S*
// CHECK2-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 2
// CHECK2-NEXT: [[TMP22:%.*]] = load i8*, i8** [[TMP21]], align 8
// CHECK2-NEXT: [[TMP23:%.*]] = bitcast i8* [[TMP22]] to %struct.S*
// CHECK2-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 3
// CHECK2-NEXT: [[TMP25:%.*]] = load i8*, i8** [[TMP24]], align 8
// CHECK2-NEXT: [[TMP26:%.*]] = bitcast i8* [[TMP25]] to float*
// CHECK2-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 3
// CHECK2-NEXT: [[TMP28:%.*]] = load i8*, i8** [[TMP27]], align 8
// CHECK2-NEXT: [[TMP29:%.*]] = bitcast i8* [[TMP28]] to float*
// CHECK2-NEXT: [[TMP30:%.*]] = load float, float* [[TMP11]], align 4
// CHECK2-NEXT: [[TMP31:%.*]] = load float, float* [[TMP8]], align 4
// CHECK2-NEXT: [[ADD:%.*]] = fadd float [[TMP30]], [[TMP31]]
// CHECK2-NEXT: store float [[ADD]], float* [[TMP11]], align 4
// CHECK2-NEXT: [[CALL:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S* @_ZN1SIfEanERKS0_(%struct.S* nonnull align 4 dereferenceable(4) [[TMP17]], %struct.S* nonnull align 4 dereferenceable(4) [[TMP14]])
// CHECK2-NEXT: [[TMP32:%.*]] = bitcast %struct.S* [[TMP17]] to i8*
// CHECK2-NEXT: [[TMP33:%.*]] = bitcast %struct.S* [[CALL]] to i8*
// CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP32]], i8* align 4 [[TMP33]], i64 4, i1 false)
// CHECK2-NEXT: [[CALL2:%.*]] = call float @_ZN1SIfEcvfEv(%struct.S* nonnull align 4 dereferenceable(4) [[TMP23]])
// CHECK2-NEXT: [[TOBOOL:%.*]] = fcmp une float [[CALL2]], 0.000000e+00
// CHECK2-NEXT: br i1 [[TOBOOL]], label [[LAND_RHS:%.*]], label [[LAND_END:%.*]]
// CHECK2: land.rhs:
// CHECK2-NEXT: [[CALL3:%.*]] = call float @_ZN1SIfEcvfEv(%struct.S* nonnull align 4 dereferenceable(4) [[TMP20]])
// CHECK2-NEXT: [[TOBOOL4:%.*]] = fcmp une float [[CALL3]], 0.000000e+00
// CHECK2-NEXT: br label [[LAND_END]]
// CHECK2: land.end:
// CHECK2-NEXT: [[TMP34:%.*]] = phi i1 [ false, [[ENTRY:%.*]] ], [ [[TOBOOL4]], [[LAND_RHS]] ]
// CHECK2-NEXT: [[CONV:%.*]] = uitofp i1 [[TMP34]] to float
// CHECK2-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP]], float [[CONV]])
// CHECK2-NEXT: [[TMP35:%.*]] = bitcast %struct.S* [[TMP23]] to i8*
// CHECK2-NEXT: [[TMP36:%.*]] = bitcast %struct.S* [[REF_TMP]] to i8*
// CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP35]], i8* align 4 [[TMP36]], i64 4, i1 false)
// CHECK2-NEXT: call void @_ZN1SIfED1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP]]) #[[ATTR5]]
// CHECK2-NEXT: [[TMP37:%.*]] = load float, float* [[TMP29]], align 4
// CHECK2-NEXT: [[TMP38:%.*]] = load float, float* [[TMP26]], align 4
// CHECK2-NEXT: [[CMP:%.*]] = fcmp olt float [[TMP37]], [[TMP38]]
// CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK2: cond.true:
// CHECK2-NEXT: [[TMP39:%.*]] = load float, float* [[TMP29]], align 4
// CHECK2-NEXT: br label [[COND_END:%.*]]
// CHECK2: cond.false:
// CHECK2-NEXT: [[TMP40:%.*]] = load float, float* [[TMP26]], align 4
// CHECK2-NEXT: br label [[COND_END]]
// CHECK2: cond.end:
// CHECK2-NEXT: [[COND:%.*]] = phi float [ [[TMP39]], [[COND_TRUE]] ], [ [[TMP40]], [[COND_FALSE]] ]
// CHECK2-NEXT: store float [[COND]], float* [[TMP29]], align 4
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@_ZN1SIfEanERKS0_
// CHECK2-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[TMP0:%.*]]) #[[ATTR0]] align 2 {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
// CHECK2-NEXT: [[DOTADDR:%.*]] = alloca %struct.S*, align 8
// CHECK2-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
// CHECK2-NEXT: store %struct.S* [[TMP0]], %struct.S** [[DOTADDR]], align 8
// CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
// CHECK2-NEXT: ret %struct.S* [[THIS1]]
//
//
// CHECK2-LABEL: define {{[^@]+}}@_ZN1SIfEcvfEv
// CHECK2-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) #[[ATTR0]] align 2 {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
// CHECK2-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
// CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
// CHECK2-NEXT: ret float 0.000000e+00
//
//
// CHECK2-LABEL: define {{[^@]+}}@_ZN1SIfED1Ev
// CHECK2-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR7]] align 2 {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
// CHECK2-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
// CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
// CHECK2-NEXT: call void @_ZN1SIfED2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR5]]
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..3
// CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [2 x i32]* nonnull align 4 dereferenceable(8) [[VEC:%.*]], float* nonnull align 4 dereferenceable(4) [[T_VAR:%.*]], [2 x %struct.S]* nonnull align 4 dereferenceable(8) [[S_ARR:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[VAR:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[VAR1:%.*]], float* nonnull align 4 dereferenceable(4) [[T_VAR1:%.*]]) #[[ATTR1]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 8
// CHECK2-NEXT: [[T_VAR_ADDR:%.*]] = alloca float*, align 8
// CHECK2-NEXT: [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S]*, align 8
// CHECK2-NEXT: [[VAR_ADDR:%.*]] = alloca %struct.S*, align 8
// CHECK2-NEXT: [[VAR1_ADDR:%.*]] = alloca %struct.S*, align 8
// CHECK2-NEXT: [[T_VAR1_ADDR:%.*]] = alloca float*, align 8
// CHECK2-NEXT: [[T_VAR2:%.*]] = alloca float, align 4
// CHECK2-NEXT: [[VAR3:%.*]] = alloca [[STRUCT_S:%.*]], align 4
// CHECK2-NEXT: [[VAR14:%.*]] = alloca [[STRUCT_S]], align 4
// CHECK2-NEXT: [[T_VAR15:%.*]] = alloca float, align 4
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK2-NEXT: store [2 x i32]* [[VEC]], [2 x i32]** [[VEC_ADDR]], align 8
// CHECK2-NEXT: store float* [[T_VAR]], float** [[T_VAR_ADDR]], align 8
// CHECK2-NEXT: store [2 x %struct.S]* [[S_ARR]], [2 x %struct.S]** [[S_ARR_ADDR]], align 8
// CHECK2-NEXT: store %struct.S* [[VAR]], %struct.S** [[VAR_ADDR]], align 8
// CHECK2-NEXT: store %struct.S* [[VAR1]], %struct.S** [[VAR1_ADDR]], align 8
// CHECK2-NEXT: store float* [[T_VAR1]], float** [[T_VAR1_ADDR]], align 8
// CHECK2-NEXT: [[TMP0:%.*]] = load [2 x i32]*, [2 x i32]** [[VEC_ADDR]], align 8
// CHECK2-NEXT: [[TMP1:%.*]] = load float*, float** [[T_VAR_ADDR]], align 8
// CHECK2-NEXT: [[TMP2:%.*]] = load [2 x %struct.S]*, [2 x %struct.S]** [[S_ARR_ADDR]], align 8
// CHECK2-NEXT: [[TMP3:%.*]] = load %struct.S*, %struct.S** [[VAR_ADDR]], align 8
// CHECK2-NEXT: [[TMP4:%.*]] = load %struct.S*, %struct.S** [[VAR1_ADDR]], align 8
// CHECK2-NEXT: [[TMP5:%.*]] = load float*, float** [[T_VAR1_ADDR]], align 8
// CHECK2-NEXT: store float 0.000000e+00, float* [[T_VAR2]], align 4
// CHECK2-NEXT: call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR3]])
// CHECK2-NEXT: call void @_ZN1SIfEC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[VAR14]])
// CHECK2-NEXT: store float 0x47EFFFFFE0000000, float* [[T_VAR15]], align 4
// CHECK2-NEXT: br label [[WHILE_COND:%.*]]
// CHECK2: while.cond:
// CHECK2-NEXT: br label [[WHILE_BODY:%.*]]
// CHECK2: while.body:
// CHECK2-NEXT: [[TMP6:%.*]] = load float, float* [[T_VAR2]], align 4
// CHECK2-NEXT: [[CONV:%.*]] = fptosi float [[TMP6]] to i32
// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[TMP0]], i64 0, i64 0
// CHECK2-NEXT: store i32 [[CONV]], i32* [[ARRAYIDX]], align 4
// CHECK2-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[TMP2]], i64 0, i64 0
// CHECK2-NEXT: [[TMP7:%.*]] = bitcast %struct.S* [[ARRAYIDX6]] to i8*
// CHECK2-NEXT: [[TMP8:%.*]] = bitcast %struct.S* [[VAR3]] to i8*
// CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP7]], i8* align 4 [[TMP8]], i64 4, i1 false)
// CHECK2-NEXT: br label [[WHILE_COND]], !llvm.loop [[LOOP4:![0-9]+]]
//
//
// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..4
// CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], { float, float }* nonnull align 4 dereferenceable(8) [[CF:%.*]]) #[[ATTR1]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[CF_ADDR:%.*]] = alloca { float, float }*, align 8
// CHECK2-NEXT: [[CF1:%.*]] = alloca { float, float }, align 4
// CHECK2-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
// CHECK2-NEXT: [[ATOMIC_TEMP:%.*]] = alloca { float, float }, align 4
// CHECK2-NEXT: [[ATOMIC_TEMP10:%.*]] = alloca { float, float }, align 4
// CHECK2-NEXT: [[TMP:%.*]] = alloca { float, float }, align 4
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK2-NEXT: store { float, float }* [[CF]], { float, float }** [[CF_ADDR]], align 8
// CHECK2-NEXT: [[TMP0:%.*]] = load { float, float }*, { float, float }** [[CF_ADDR]], align 8
// CHECK2-NEXT: [[CF1_REALP:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[CF1]], i32 0, i32 0
// CHECK2-NEXT: [[CF1_IMAGP:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[CF1]], i32 0, i32 1
// CHECK2-NEXT: store float 0.000000e+00, float* [[CF1_REALP]], align 4
// CHECK2-NEXT: store float 0.000000e+00, float* [[CF1_IMAGP]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// CHECK2-NEXT: [[TMP2:%.*]] = bitcast { float, float }* [[CF1]] to i8*
// CHECK2-NEXT: store i8* [[TMP2]], i8** [[TMP1]], align 8
// CHECK2-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK2-NEXT: [[TMP5:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// CHECK2-NEXT: [[TMP6:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]], i32 1, i64 8, i8* [[TMP5]], void (i8*, i8*)* @.omp.reduction.reduction_func.5, [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK2-NEXT: switch i32 [[TMP6]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// CHECK2-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
// CHECK2-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
// CHECK2-NEXT: ]
// CHECK2: .omp.reduction.case1:
// CHECK2-NEXT: [[DOTREALP:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[TMP0]], i32 0, i32 0
// CHECK2-NEXT: [[DOTREAL:%.*]] = load float, float* [[DOTREALP]], align 4
// CHECK2-NEXT: [[DOTIMAGP:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[TMP0]], i32 0, i32 1
// CHECK2-NEXT: [[DOTIMAG:%.*]] = load float, float* [[DOTIMAGP]], align 4
// CHECK2-NEXT: [[CF1_REALP2:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[CF1]], i32 0, i32 0
// CHECK2-NEXT: [[CF1_REAL:%.*]] = load float, float* [[CF1_REALP2]], align 4
// CHECK2-NEXT: [[CF1_IMAGP3:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[CF1]], i32 0, i32 1
// CHECK2-NEXT: [[CF1_IMAG:%.*]] = load float, float* [[CF1_IMAGP3]], align 4
// CHECK2-NEXT: [[ADD_R:%.*]] = fadd float [[DOTREAL]], [[CF1_REAL]]
// CHECK2-NEXT: [[ADD_I:%.*]] = fadd float [[DOTIMAG]], [[CF1_IMAG]]
// CHECK2-NEXT: [[DOTREALP4:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[TMP0]], i32 0, i32 0
// CHECK2-NEXT: [[DOTIMAGP5:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[TMP0]], i32 0, i32 1
// CHECK2-NEXT: store float [[ADD_R]], float* [[DOTREALP4]], align 4
// CHECK2-NEXT: store float [[ADD_I]], float* [[DOTIMAGP5]], align 4
// CHECK2-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK2: .omp.reduction.case2:
// CHECK2-NEXT: [[CF1_REALP6:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[CF1]], i32 0, i32 0
// CHECK2-NEXT: [[CF1_REAL7:%.*]] = load float, float* [[CF1_REALP6]], align 4
// CHECK2-NEXT: [[CF1_IMAGP8:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[CF1]], i32 0, i32 1
// CHECK2-NEXT: [[CF1_IMAG9:%.*]] = load float, float* [[CF1_IMAGP8]], align 4
// CHECK2-NEXT: [[TMP7:%.*]] = bitcast { float, float }* [[TMP0]] to i8*
// CHECK2-NEXT: [[TMP8:%.*]] = bitcast { float, float }* [[ATOMIC_TEMP]] to i8*
// CHECK2-NEXT: call void @__atomic_load(i64 8, i8* [[TMP7]], i8* [[TMP8]], i32 0)
// CHECK2-NEXT: br label [[ATOMIC_CONT:%.*]]
// CHECK2: atomic_cont:
// CHECK2-NEXT: [[ATOMIC_TEMP_REALP:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[ATOMIC_TEMP]], i32 0, i32 0
// CHECK2-NEXT: [[ATOMIC_TEMP_REAL:%.*]] = load float, float* [[ATOMIC_TEMP_REALP]], align 4
// CHECK2-NEXT: [[ATOMIC_TEMP_IMAGP:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[ATOMIC_TEMP]], i32 0, i32 1
// CHECK2-NEXT: [[ATOMIC_TEMP_IMAG:%.*]] = load float, float* [[ATOMIC_TEMP_IMAGP]], align 4
// CHECK2-NEXT: [[TMP_REALP:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[TMP]], i32 0, i32 0
// CHECK2-NEXT: [[TMP_IMAGP:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[TMP]], i32 0, i32 1
// CHECK2-NEXT: store float [[ATOMIC_TEMP_REAL]], float* [[TMP_REALP]], align 4
// CHECK2-NEXT: store float [[ATOMIC_TEMP_IMAG]], float* [[TMP_IMAGP]], align 4
// CHECK2-NEXT: [[TMP_REALP11:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[TMP]], i32 0, i32 0
// CHECK2-NEXT: [[TMP_REAL:%.*]] = load float, float* [[TMP_REALP11]], align 4
// CHECK2-NEXT: [[TMP_IMAGP12:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[TMP]], i32 0, i32 1
// CHECK2-NEXT: [[TMP_IMAG:%.*]] = load float, float* [[TMP_IMAGP12]], align 4
// CHECK2-NEXT: [[CF1_REALP13:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[CF1]], i32 0, i32 0
// CHECK2-NEXT: [[CF1_REAL14:%.*]] = load float, float* [[CF1_REALP13]], align 4
// CHECK2-NEXT: [[CF1_IMAGP15:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[CF1]], i32 0, i32 1
// CHECK2-NEXT: [[CF1_IMAG16:%.*]] = load float, float* [[CF1_IMAGP15]], align 4
// CHECK2-NEXT: [[ADD_R17:%.*]] = fadd float [[TMP_REAL]], [[CF1_REAL14]]
// CHECK2-NEXT: [[ADD_I18:%.*]] = fadd float [[TMP_IMAG]], [[CF1_IMAG16]]
// CHECK2-NEXT: [[ATOMIC_TEMP10_REALP:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[ATOMIC_TEMP10]], i32 0, i32 0
// CHECK2-NEXT: [[ATOMIC_TEMP10_IMAGP:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[ATOMIC_TEMP10]], i32 0, i32 1
// CHECK2-NEXT: store float [[ADD_R17]], float* [[ATOMIC_TEMP10_REALP]], align 4
// CHECK2-NEXT: store float [[ADD_I18]], float* [[ATOMIC_TEMP10_IMAGP]], align 4
// CHECK2-NEXT: [[TMP9:%.*]] = bitcast { float, float }* [[TMP0]] to i8*
// CHECK2-NEXT: [[TMP10:%.*]] = bitcast { float, float }* [[ATOMIC_TEMP]] to i8*
// CHECK2-NEXT: [[TMP11:%.*]] = bitcast { float, float }* [[ATOMIC_TEMP10]] to i8*
// CHECK2-NEXT: [[CALL:%.*]] = call zeroext i1 @__atomic_compare_exchange(i64 8, i8* [[TMP9]], i8* [[TMP10]], i8* [[TMP11]], i32 0, i32 0)
// CHECK2-NEXT: br i1 [[CALL]], label [[ATOMIC_EXIT:%.*]], label [[ATOMIC_CONT]]
// CHECK2: atomic_exit:
// CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK2: .omp.reduction.default:
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.5
// CHECK2-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR3]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
// CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
// CHECK2-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
// CHECK2-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
// CHECK2-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
// CHECK2-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
// CHECK2-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
// CHECK2-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
// CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
// CHECK2-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// CHECK2-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to { float, float }*
// CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
// CHECK2-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
// CHECK2-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to { float, float }*
// CHECK2-NEXT: [[DOTREALP:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[TMP11]], i32 0, i32 0
// CHECK2-NEXT: [[DOTREAL:%.*]] = load float, float* [[DOTREALP]], align 4
// CHECK2-NEXT: [[DOTIMAGP:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[TMP11]], i32 0, i32 1
// CHECK2-NEXT: [[DOTIMAG:%.*]] = load float, float* [[DOTIMAGP]], align 4
// CHECK2-NEXT: [[DOTREALP2:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[TMP8]], i32 0, i32 0
// CHECK2-NEXT: [[DOTREAL3:%.*]] = load float, float* [[DOTREALP2]], align 4
// CHECK2-NEXT: [[DOTIMAGP4:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[TMP8]], i32 0, i32 1
// CHECK2-NEXT: [[DOTIMAG5:%.*]] = load float, float* [[DOTIMAGP4]], align 4
// CHECK2-NEXT: [[ADD_R:%.*]] = fadd float [[DOTREAL]], [[DOTREAL3]]
// CHECK2-NEXT: [[ADD_I:%.*]] = fadd float [[DOTIMAG]], [[DOTIMAG5]]
// CHECK2-NEXT: [[DOTREALP6:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[TMP11]], i32 0, i32 0
// CHECK2-NEXT: [[DOTIMAGP7:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[TMP11]], i32 0, i32 1
// CHECK2-NEXT: store float [[ADD_R]], float* [[DOTREALP6]], align 4
// CHECK2-NEXT: store float [[ADD_I]], float* [[DOTIMAGP7]], align 4
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
// CHECK2-SAME: () #[[ATTR0]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[T:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[TEST:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
// CHECK2-NEXT: [[SST:%.*]] = alloca [[STRUCT_SST:%.*]], align 4
// CHECK2-NEXT: [[T_VAR:%.*]] = alloca i32, align 128
// CHECK2-NEXT: [[T_VAR1:%.*]] = alloca i32, align 128
// CHECK2-NEXT: [[VEC:%.*]] = alloca [2 x i32], align 4
// CHECK2-NEXT: [[S_ARR:%.*]] = alloca [2 x %struct.S.0], align 4
// CHECK2-NEXT: [[VAR:%.*]] = alloca [[STRUCT_S_0]], align 128
// CHECK2-NEXT: [[VAR1:%.*]] = alloca [[STRUCT_S_0]], align 128
// CHECK2-NEXT: call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[TEST]])
// CHECK2-NEXT: call void @_ZN3SSTIiEC1Ev(%struct.SST* nonnull align 4 dereferenceable(4) [[SST]])
// CHECK2-NEXT: store i32 0, i32* [[T_VAR]], align 128
// CHECK2-NEXT: [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
// CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const._Z5tmainIiET_v.vec to i8*), i64 8, i1 false)
// CHECK2-NEXT: [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i64 0, i64 0
// CHECK2-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN]], i32 1)
// CHECK2-NEXT: [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYINIT_BEGIN]], i64 1
// CHECK2-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], i32 2)
// CHECK2-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR]], i32 3)
// CHECK2-NEXT: call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR1]])
// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [2 x i32]*, i32*, [2 x %struct.S.0]*, %struct.S.0*, %struct.S.0*, i32*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), [2 x i32]* [[VEC]], i32* [[T_VAR]], [2 x %struct.S.0]* [[S_ARR]], %struct.S.0* [[VAR]], %struct.S.0* [[VAR1]], i32* [[T_VAR1]])
// CHECK2-NEXT: store i32 0, i32* [[RETVAL]], align 4
// CHECK2-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR1]]) #[[ATTR5]]
// CHECK2-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR]]) #[[ATTR5]]
// CHECK2-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
// CHECK2-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i64 2
// CHECK2-NEXT: br label [[ARRAYDESTROY_BODY:%.*]]
// CHECK2: arraydestroy.body:
// CHECK2-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP1]], [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
// CHECK2-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
// CHECK2-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR5]]
// CHECK2-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
// CHECK2-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]]
// CHECK2: arraydestroy.done1:
// CHECK2-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR5]]
// CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[RETVAL]], align 4
// CHECK2-NEXT: ret i32 [[TMP2]]
//
//
// CHECK2-LABEL: define {{[^@]+}}@_ZN2SSC2ERi
// CHECK2-SAME: (%struct.SS* nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32* nonnull align 4 dereferenceable(4) [[D:%.*]]) unnamed_addr #[[ATTR7]] align 2 {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK2-NEXT: [[D_ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[A2:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[B4:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[C5:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK2-NEXT: store i32* [[D]], i32** [[D_ADDR]], align 8
// CHECK2-NEXT: [[THIS1:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[THIS1]], i32 0, i32 0
// CHECK2-NEXT: store i32 0, i32* [[A]], align 8
// CHECK2-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 1
// CHECK2-NEXT: [[BF_LOAD:%.*]] = load i8, i8* [[B]], align 4
// CHECK2-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -16
// CHECK2-NEXT: store i8 [[BF_CLEAR]], i8* [[B]], align 4
// CHECK2-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 2
// CHECK2-NEXT: [[TMP0:%.*]] = load i32*, i32** [[D_ADDR]], align 8
// CHECK2-NEXT: store i32* [[TMP0]], i32** [[C]], align 8
// CHECK2-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
// CHECK2-NEXT: store i32* [[A3]], i32** [[A2]], align 8
// CHECK2-NEXT: [[C6:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 2
// CHECK2-NEXT: [[TMP1:%.*]] = load i32*, i32** [[C6]], align 8
// CHECK2-NEXT: store i32* [[TMP1]], i32** [[C5]], align 8
// CHECK2-NEXT: [[TMP2:%.*]] = load i32*, i32** [[A2]], align 8
// CHECK2-NEXT: [[TMP3:%.*]] = load i32*, i32** [[C5]], align 8
// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*, i32*, i32*, i32*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), %struct.SS* [[THIS1]], i32* [[TMP2]], i32* [[B4]], i32* [[TMP3]])
// CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[B4]], align 4
// CHECK2-NEXT: [[B7:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 1
// CHECK2-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i8
// CHECK2-NEXT: [[BF_LOAD8:%.*]] = load i8, i8* [[B7]], align 4
// CHECK2-NEXT: [[BF_VALUE:%.*]] = and i8 [[TMP5]], 15
// CHECK2-NEXT: [[BF_CLEAR9:%.*]] = and i8 [[BF_LOAD8]], -16
// CHECK2-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR9]], [[BF_VALUE]]
// CHECK2-NEXT: store i8 [[BF_SET]], i8* [[B7]], align 4
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..6
// CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i32* nonnull align 4 dereferenceable(4) [[B:%.*]], i32* nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[B_ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[C_ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[TMP:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[_TMP1:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[A2:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[_TMP3:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[B4:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[C5:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[_TMP6:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [3 x i8*], align 8
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK2-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK2-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK2-NEXT: store i32* [[B]], i32** [[B_ADDR]], align 8
// CHECK2-NEXT: store i32* [[C]], i32** [[C_ADDR]], align 8
// CHECK2-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK2-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK2-NEXT: [[TMP2:%.*]] = load i32*, i32** [[B_ADDR]], align 8
// CHECK2-NEXT: [[TMP3:%.*]] = load i32*, i32** [[C_ADDR]], align 8
// CHECK2-NEXT: store i32* [[TMP1]], i32** [[TMP]], align 8
// CHECK2-NEXT: store i32* [[TMP3]], i32** [[_TMP1]], align 8
// CHECK2-NEXT: [[TMP4:%.*]] = load i32*, i32** [[TMP]], align 8
// CHECK2-NEXT: store i32 0, i32* [[A2]], align 4
// CHECK2-NEXT: store i32* [[A2]], i32** [[_TMP3]], align 8
// CHECK2-NEXT: store i32 0, i32* [[B4]], align 4
// CHECK2-NEXT: [[TMP5:%.*]] = load i32*, i32** [[_TMP1]], align 8
// CHECK2-NEXT: store i32 0, i32* [[C5]], align 4
// CHECK2-NEXT: store i32* [[C5]], i32** [[_TMP6]], align 8
// CHECK2-NEXT: [[TMP6:%.*]] = load i32*, i32** [[_TMP3]], align 8
// CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
// CHECK2-NEXT: [[INC:%.*]] = add nsw i32 [[TMP7]], 1
// CHECK2-NEXT: store i32 [[INC]], i32* [[TMP6]], align 4
// CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[B4]], align 4
// CHECK2-NEXT: [[DEC:%.*]] = add nsw i32 [[TMP8]], -1
// CHECK2-NEXT: store i32 [[DEC]], i32* [[B4]], align 4
// CHECK2-NEXT: [[TMP9:%.*]] = load i32*, i32** [[_TMP6]], align 8
// CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
// CHECK2-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP10]], 1
// CHECK2-NEXT: store i32 [[DIV]], i32* [[TMP9]], align 4
// CHECK2-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// CHECK2-NEXT: [[TMP12:%.*]] = bitcast i32* [[A2]] to i8*
// CHECK2-NEXT: store i8* [[TMP12]], i8** [[TMP11]], align 8
// CHECK2-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1
// CHECK2-NEXT: [[TMP14:%.*]] = bitcast i32* [[B4]] to i8*
// CHECK2-NEXT: store i8* [[TMP14]], i8** [[TMP13]], align 8
// CHECK2-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 2
// CHECK2-NEXT: [[TMP16:%.*]] = bitcast i32* [[C5]] to i8*
// CHECK2-NEXT: store i8* [[TMP16]], i8** [[TMP15]], align 8
// CHECK2-NEXT: [[TMP17:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK2-NEXT: [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 4
// CHECK2-NEXT: [[TMP19:%.*]] = bitcast [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// CHECK2-NEXT: [[TMP20:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB1]], i32 [[TMP18]], i32 3, i64 24, i8* [[TMP19]], void (i8*, i8*)* @.omp.reduction.reduction_func.7, [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK2-NEXT: switch i32 [[TMP20]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// CHECK2-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
// CHECK2-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
// CHECK2-NEXT: ]
// CHECK2: .omp.reduction.case1:
// CHECK2-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP4]], align 4
// CHECK2-NEXT: [[TMP22:%.*]] = load i32, i32* [[A2]], align 4
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
// CHECK2-NEXT: store i32 [[ADD]], i32* [[TMP4]], align 4
// CHECK2-NEXT: [[TMP23:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK2-NEXT: [[TMP24:%.*]] = load i32, i32* [[B4]], align 4
// CHECK2-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
// CHECK2-NEXT: store i32 [[ADD7]], i32* [[TMP2]], align 4
// CHECK2-NEXT: [[TMP25:%.*]] = load i32, i32* [[TMP5]], align 4
// CHECK2-NEXT: [[TMP26:%.*]] = load i32, i32* [[C5]], align 4
// CHECK2-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
// CHECK2-NEXT: store i32 [[ADD8]], i32* [[TMP5]], align 4
// CHECK2-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB1]], i32 [[TMP18]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK2: .omp.reduction.case2:
// CHECK2-NEXT: [[TMP27:%.*]] = load i32, i32* [[A2]], align 4
// CHECK2-NEXT: [[TMP28:%.*]] = atomicrmw add i32* [[TMP4]], i32 [[TMP27]] monotonic, align 4
// CHECK2-NEXT: [[TMP29:%.*]] = load i32, i32* [[B4]], align 4
// CHECK2-NEXT: [[TMP30:%.*]] = atomicrmw add i32* [[TMP2]], i32 [[TMP29]] monotonic, align 4
// CHECK2-NEXT: [[TMP31:%.*]] = load i32, i32* [[C5]], align 4
// CHECK2-NEXT: [[TMP32:%.*]] = atomicrmw add i32* [[TMP5]], i32 [[TMP31]] monotonic, align 4
// CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK2: .omp.reduction.default:
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.7
// CHECK2-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR3]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
// CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
// CHECK2-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
// CHECK2-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
// CHECK2-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
// CHECK2-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [3 x i8*]*
// CHECK2-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
// CHECK2-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [3 x i8*]*
// CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP5]], i64 0, i64 0
// CHECK2-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// CHECK2-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
// CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP3]], i64 0, i64 0
// CHECK2-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
// CHECK2-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
// CHECK2-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP5]], i64 0, i64 1
// CHECK2-NEXT: [[TMP13:%.*]] = load i8*, i8** [[TMP12]], align 8
// CHECK2-NEXT: [[TMP14:%.*]] = bitcast i8* [[TMP13]] to i32*
// CHECK2-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP3]], i64 0, i64 1
// CHECK2-NEXT: [[TMP16:%.*]] = load i8*, i8** [[TMP15]], align 8
// CHECK2-NEXT: [[TMP17:%.*]] = bitcast i8* [[TMP16]] to i32*
// CHECK2-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP5]], i64 0, i64 2
// CHECK2-NEXT: [[TMP19:%.*]] = load i8*, i8** [[TMP18]], align 8
// CHECK2-NEXT: [[TMP20:%.*]] = bitcast i8* [[TMP19]] to i32*
// CHECK2-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP3]], i64 0, i64 2
// CHECK2-NEXT: [[TMP22:%.*]] = load i8*, i8** [[TMP21]], align 8
// CHECK2-NEXT: [[TMP23:%.*]] = bitcast i8* [[TMP22]] to i32*
// CHECK2-NEXT: [[TMP24:%.*]] = load i32, i32* [[TMP11]], align 4
// CHECK2-NEXT: [[TMP25:%.*]] = load i32, i32* [[TMP8]], align 4
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP24]], [[TMP25]]
// CHECK2-NEXT: store i32 [[ADD]], i32* [[TMP11]], align 4
// CHECK2-NEXT: [[TMP26:%.*]] = load i32, i32* [[TMP17]], align 4
// CHECK2-NEXT: [[TMP27:%.*]] = load i32, i32* [[TMP14]], align 4
// CHECK2-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP26]], [[TMP27]]
// CHECK2-NEXT: store i32 [[ADD2]], i32* [[TMP17]], align 4
// CHECK2-NEXT: [[TMP28:%.*]] = load i32, i32* [[TMP23]], align 4
// CHECK2-NEXT: [[TMP29:%.*]] = load i32, i32* [[TMP20]], align 4
// CHECK2-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP28]], [[TMP29]]
// CHECK2-NEXT: store i32 [[ADD3]], i32* [[TMP23]], align 4
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ev
// CHECK2-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR7]] align 2 {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
// CHECK2-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
// CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
// CHECK2-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
// CHECK2-NEXT: [[TMP0:%.*]] = load volatile i32, i32* @g, align 128
// CHECK2-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP0]] to float
// CHECK2-NEXT: store float [[CONV]], float* [[F]], align 4
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ef
// CHECK2-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR7]] align 2 {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca float, align 4
// CHECK2-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
// CHECK2-NEXT: store float [[A]], float* [[A_ADDR]], align 4
// CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
// CHECK2-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
// CHECK2-NEXT: [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = load volatile i32, i32* @g, align 128
// CHECK2-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP1]] to float
// CHECK2-NEXT: [[ADD:%.*]] = fadd float [[TMP0]], [[CONV]]
// CHECK2-NEXT: store float [[ADD]], float* [[F]], align 4
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@_ZN1SIfED2Ev
// CHECK2-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR7]] align 2 {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
// CHECK2-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
// CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ev
// CHECK2-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR7]] align 2 {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
// CHECK2-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
// CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
// CHECK2-NEXT: call void @_ZN1SIiEC2Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]])
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@_ZN3SSTIiEC1Ev
// CHECK2-SAME: (%struct.SST* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR7]] align 2 {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SST*, align 8
// CHECK2-NEXT: store %struct.SST* [[THIS]], %struct.SST** [[THIS_ADDR]], align 8
// CHECK2-NEXT: [[THIS1:%.*]] = load %struct.SST*, %struct.SST** [[THIS_ADDR]], align 8
// CHECK2-NEXT: call void @_ZN3SSTIiEC2Ev(%struct.SST* nonnull align 4 dereferenceable(4) [[THIS1]])
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ei
// CHECK2-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 [[A:%.*]]) unnamed_addr #[[ATTR7]] align 2 {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
// CHECK2-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
// CHECK2-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK2-NEXT: call void @_ZN1SIiEC2Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]], i32 [[TMP0]])
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..8
// CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [2 x i32]* nonnull align 4 dereferenceable(8) [[VEC:%.*]], i32* nonnull align 4 dereferenceable(4) [[T_VAR:%.*]], [2 x %struct.S.0]* nonnull align 4 dereferenceable(8) [[S_ARR:%.*]], %struct.S.0* nonnull align 4 dereferenceable(4) [[VAR:%.*]], %struct.S.0* nonnull align 4 dereferenceable(4) [[VAR1:%.*]], i32* nonnull align 4 dereferenceable(4) [[T_VAR1:%.*]]) #[[ATTR1]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 8
// CHECK2-NEXT: [[T_VAR_ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S.0]*, align 8
// CHECK2-NEXT: [[VAR_ADDR:%.*]] = alloca %struct.S.0*, align 8
// CHECK2-NEXT: [[VAR1_ADDR:%.*]] = alloca %struct.S.0*, align 8
// CHECK2-NEXT: [[T_VAR1_ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[T_VAR2:%.*]] = alloca i32, align 128
// CHECK2-NEXT: [[VAR3:%.*]] = alloca [[STRUCT_S_0:%.*]], align 128
// CHECK2-NEXT: [[VAR14:%.*]] = alloca [[STRUCT_S_0]], align 128
// CHECK2-NEXT: [[T_VAR15:%.*]] = alloca i32, align 128
// CHECK2-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [4 x i8*], align 8
// CHECK2-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S_0]], align 4
// CHECK2-NEXT: [[REF_TMP11:%.*]] = alloca [[STRUCT_S_0]], align 4
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK2-NEXT: store [2 x i32]* [[VEC]], [2 x i32]** [[VEC_ADDR]], align 8
// CHECK2-NEXT: store i32* [[T_VAR]], i32** [[T_VAR_ADDR]], align 8
// CHECK2-NEXT: store [2 x %struct.S.0]* [[S_ARR]], [2 x %struct.S.0]** [[S_ARR_ADDR]], align 8
// CHECK2-NEXT: store %struct.S.0* [[VAR]], %struct.S.0** [[VAR_ADDR]], align 8
// CHECK2-NEXT: store %struct.S.0* [[VAR1]], %struct.S.0** [[VAR1_ADDR]], align 8
// CHECK2-NEXT: store i32* [[T_VAR1]], i32** [[T_VAR1_ADDR]], align 8
// CHECK2-NEXT: [[TMP0:%.*]] = load [2 x i32]*, [2 x i32]** [[VEC_ADDR]], align 8
// CHECK2-NEXT: [[TMP1:%.*]] = load i32*, i32** [[T_VAR_ADDR]], align 8
// CHECK2-NEXT: [[TMP2:%.*]] = load [2 x %struct.S.0]*, [2 x %struct.S.0]** [[S_ARR_ADDR]], align 8
// CHECK2-NEXT: [[TMP3:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR_ADDR]], align 8
// CHECK2-NEXT: [[TMP4:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR1_ADDR]], align 8
// CHECK2-NEXT: [[TMP5:%.*]] = load i32*, i32** [[T_VAR1_ADDR]], align 8
// CHECK2-NEXT: store i32 0, i32* [[T_VAR2]], align 128
// CHECK2-NEXT: call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR3]])
// CHECK2-NEXT: call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR14]])
// CHECK2-NEXT: store i32 2147483647, i32* [[T_VAR15]], align 128
// CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[T_VAR2]], align 128
// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[TMP0]], i64 0, i64 0
// CHECK2-NEXT: store i32 [[TMP6]], i32* [[ARRAYIDX]], align 4
// CHECK2-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[TMP2]], i64 0, i64 0
// CHECK2-NEXT: [[TMP7:%.*]] = bitcast %struct.S.0* [[ARRAYIDX6]] to i8*
// CHECK2-NEXT: [[TMP8:%.*]] = bitcast %struct.S.0* [[VAR3]] to i8*
// CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP7]], i8* align 128 [[TMP8]], i64 4, i1 false)
// CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// CHECK2-NEXT: [[TMP10:%.*]] = bitcast i32* [[T_VAR2]] to i8*
// CHECK2-NEXT: store i8* [[TMP10]], i8** [[TMP9]], align 8
// CHECK2-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1
// CHECK2-NEXT: [[TMP12:%.*]] = bitcast %struct.S.0* [[VAR3]] to i8*
// CHECK2-NEXT: store i8* [[TMP12]], i8** [[TMP11]], align 8
// CHECK2-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 2
// CHECK2-NEXT: [[TMP14:%.*]] = bitcast %struct.S.0* [[VAR14]] to i8*
// CHECK2-NEXT: store i8* [[TMP14]], i8** [[TMP13]], align 8
// CHECK2-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 3
// CHECK2-NEXT: [[TMP16:%.*]] = bitcast i32* [[T_VAR15]] to i8*
// CHECK2-NEXT: store i8* [[TMP16]], i8** [[TMP15]], align 8
// CHECK2-NEXT: [[TMP17:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK2-NEXT: [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 4
// CHECK2-NEXT: [[TMP19:%.*]] = bitcast [4 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// CHECK2-NEXT: [[TMP20:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB1]], i32 [[TMP18]], i32 4, i64 32, i8* [[TMP19]], void (i8*, i8*)* @.omp.reduction.reduction_func.9, [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK2-NEXT: switch i32 [[TMP20]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// CHECK2-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
// CHECK2-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
// CHECK2-NEXT: ]
// CHECK2: .omp.reduction.case1:
// CHECK2-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP1]], align 128
// CHECK2-NEXT: [[TMP22:%.*]] = load i32, i32* [[T_VAR2]], align 128
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
// CHECK2-NEXT: store i32 [[ADD]], i32* [[TMP1]], align 128
// CHECK2-NEXT: [[CALL:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S.0* @_ZN1SIiEanERKS0_(%struct.S.0* nonnull align 4 dereferenceable(4) [[TMP3]], %struct.S.0* nonnull align 4 dereferenceable(4) [[VAR3]])
// CHECK2-NEXT: [[TMP23:%.*]] = bitcast %struct.S.0* [[TMP3]] to i8*
// CHECK2-NEXT: [[TMP24:%.*]] = bitcast %struct.S.0* [[CALL]] to i8*
// CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 128 [[TMP23]], i8* align 4 [[TMP24]], i64 4, i1 false)
// CHECK2-NEXT: [[CALL7:%.*]] = call i32 @_ZN1SIiEcviEv(%struct.S.0* nonnull align 4 dereferenceable(4) [[TMP4]])
// CHECK2-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[CALL7]], 0
// CHECK2-NEXT: br i1 [[TOBOOL]], label [[LAND_RHS:%.*]], label [[LAND_END:%.*]]
// CHECK2: land.rhs:
// CHECK2-NEXT: [[CALL8:%.*]] = call i32 @_ZN1SIiEcviEv(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR14]])
// CHECK2-NEXT: [[TOBOOL9:%.*]] = icmp ne i32 [[CALL8]], 0
// CHECK2-NEXT: br label [[LAND_END]]
// CHECK2: land.end:
// CHECK2-NEXT: [[TMP25:%.*]] = phi i1 [ false, [[DOTOMP_REDUCTION_CASE1]] ], [ [[TOBOOL9]], [[LAND_RHS]] ]
// CHECK2-NEXT: [[CONV:%.*]] = zext i1 [[TMP25]] to i32
// CHECK2-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[REF_TMP]], i32 [[CONV]])
// CHECK2-NEXT: [[TMP26:%.*]] = bitcast %struct.S.0* [[TMP4]] to i8*
// CHECK2-NEXT: [[TMP27:%.*]] = bitcast %struct.S.0* [[REF_TMP]] to i8*
// CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 128 [[TMP26]], i8* align 4 [[TMP27]], i64 4, i1 false)
// CHECK2-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[REF_TMP]]) #[[ATTR5]]
// CHECK2-NEXT: [[TMP28:%.*]] = load i32, i32* [[TMP5]], align 128
// CHECK2-NEXT: [[TMP29:%.*]] = load i32, i32* [[T_VAR15]], align 128
// CHECK2-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP28]], [[TMP29]]
// CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK2: cond.true:
// CHECK2-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP5]], align 128
// CHECK2-NEXT: br label [[COND_END:%.*]]
// CHECK2: cond.false:
// CHECK2-NEXT: [[TMP31:%.*]] = load i32, i32* [[T_VAR15]], align 128
// CHECK2-NEXT: br label [[COND_END]]
// CHECK2: cond.end:
// CHECK2-NEXT: [[COND:%.*]] = phi i32 [ [[TMP30]], [[COND_TRUE]] ], [ [[TMP31]], [[COND_FALSE]] ]
// CHECK2-NEXT: store i32 [[COND]], i32* [[TMP5]], align 128
// CHECK2-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB1]], i32 [[TMP18]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK2: .omp.reduction.case2:
// CHECK2-NEXT: [[TMP32:%.*]] = load i32, i32* [[T_VAR2]], align 128
// CHECK2-NEXT: [[TMP33:%.*]] = atomicrmw add i32* [[TMP1]], i32 [[TMP32]] monotonic, align 4
// CHECK2-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB2]], i32 [[TMP18]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
// CHECK2-NEXT: [[CALL10:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S.0* @_ZN1SIiEanERKS0_(%struct.S.0* nonnull align 4 dereferenceable(4) [[TMP3]], %struct.S.0* nonnull align 4 dereferenceable(4) [[VAR3]])
// CHECK2-NEXT: [[TMP34:%.*]] = bitcast %struct.S.0* [[TMP3]] to i8*
// CHECK2-NEXT: [[TMP35:%.*]] = bitcast %struct.S.0* [[CALL10]] to i8*
// CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 128 [[TMP34]], i8* align 4 [[TMP35]], i64 4, i1 false)
// CHECK2-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB2]], i32 [[TMP18]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
// CHECK2-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB2]], i32 [[TMP18]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
// CHECK2-NEXT: [[CALL12:%.*]] = call i32 @_ZN1SIiEcviEv(%struct.S.0* nonnull align 4 dereferenceable(4) [[TMP4]])
// CHECK2-NEXT: [[TOBOOL13:%.*]] = icmp ne i32 [[CALL12]], 0
// CHECK2-NEXT: br i1 [[TOBOOL13]], label [[LAND_RHS14:%.*]], label [[LAND_END17:%.*]]
// CHECK2: land.rhs14:
// CHECK2-NEXT: [[CALL15:%.*]] = call i32 @_ZN1SIiEcviEv(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR14]])
// CHECK2-NEXT: [[TOBOOL16:%.*]] = icmp ne i32 [[CALL15]], 0
// CHECK2-NEXT: br label [[LAND_END17]]
// CHECK2: land.end17:
// CHECK2-NEXT: [[TMP36:%.*]] = phi i1 [ false, [[DOTOMP_REDUCTION_CASE2]] ], [ [[TOBOOL16]], [[LAND_RHS14]] ]
// CHECK2-NEXT: [[CONV18:%.*]] = zext i1 [[TMP36]] to i32
// CHECK2-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[REF_TMP11]], i32 [[CONV18]])
// CHECK2-NEXT: [[TMP37:%.*]] = bitcast %struct.S.0* [[TMP4]] to i8*
// CHECK2-NEXT: [[TMP38:%.*]] = bitcast %struct.S.0* [[REF_TMP11]] to i8*
// CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 128 [[TMP37]], i8* align 4 [[TMP38]], i64 4, i1 false)
// CHECK2-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[REF_TMP11]]) #[[ATTR5]]
// CHECK2-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB2]], i32 [[TMP18]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var)
// CHECK2-NEXT: [[TMP39:%.*]] = load i32, i32* [[T_VAR15]], align 128
// CHECK2-NEXT: [[TMP40:%.*]] = atomicrmw min i32* [[TMP5]], i32 [[TMP39]] monotonic, align 4
// CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK2: .omp.reduction.default:
// CHECK2-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR14]]) #[[ATTR5]]
// CHECK2-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[VAR3]]) #[[ATTR5]]
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.9
// CHECK2-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR3]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
// CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
// CHECK2-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
// CHECK2-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
// CHECK2-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
// CHECK2-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
// CHECK2-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [4 x i8*]*
// CHECK2-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
// CHECK2-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [4 x i8*]*
// CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 0
// CHECK2-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// CHECK2-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
// CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 0
// CHECK2-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
// CHECK2-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
// CHECK2-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 1
// CHECK2-NEXT: [[TMP13:%.*]] = load i8*, i8** [[TMP12]], align 8
// CHECK2-NEXT: [[TMP14:%.*]] = bitcast i8* [[TMP13]] to %struct.S.0*
// CHECK2-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 1
// CHECK2-NEXT: [[TMP16:%.*]] = load i8*, i8** [[TMP15]], align 8
// CHECK2-NEXT: [[TMP17:%.*]] = bitcast i8* [[TMP16]] to %struct.S.0*
// CHECK2-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 2
// CHECK2-NEXT: [[TMP19:%.*]] = load i8*, i8** [[TMP18]], align 8
// CHECK2-NEXT: [[TMP20:%.*]] = bitcast i8* [[TMP19]] to %struct.S.0*
// CHECK2-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 2
// CHECK2-NEXT: [[TMP22:%.*]] = load i8*, i8** [[TMP21]], align 8
// CHECK2-NEXT: [[TMP23:%.*]] = bitcast i8* [[TMP22]] to %struct.S.0*
// CHECK2-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP5]], i64 0, i64 3
// CHECK2-NEXT: [[TMP25:%.*]] = load i8*, i8** [[TMP24]], align 8
// CHECK2-NEXT: [[TMP26:%.*]] = bitcast i8* [[TMP25]] to i32*
// CHECK2-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[TMP3]], i64 0, i64 3
// CHECK2-NEXT: [[TMP28:%.*]] = load i8*, i8** [[TMP27]], align 8
// CHECK2-NEXT: [[TMP29:%.*]] = bitcast i8* [[TMP28]] to i32*
// CHECK2-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP11]], align 128
// CHECK2-NEXT: [[TMP31:%.*]] = load i32, i32* [[TMP8]], align 128
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP30]], [[TMP31]]
// CHECK2-NEXT: store i32 [[ADD]], i32* [[TMP11]], align 128
// CHECK2-NEXT: [[CALL:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S.0* @_ZN1SIiEanERKS0_(%struct.S.0* nonnull align 4 dereferenceable(4) [[TMP17]], %struct.S.0* nonnull align 4 dereferenceable(4) [[TMP14]])
// CHECK2-NEXT: [[TMP32:%.*]] = bitcast %struct.S.0* [[TMP17]] to i8*
// CHECK2-NEXT: [[TMP33:%.*]] = bitcast %struct.S.0* [[CALL]] to i8*
// CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 128 [[TMP32]], i8* align 4 [[TMP33]], i64 4, i1 false)
// CHECK2-NEXT: [[CALL2:%.*]] = call i32 @_ZN1SIiEcviEv(%struct.S.0* nonnull align 4 dereferenceable(4) [[TMP23]])
// CHECK2-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[CALL2]], 0
// CHECK2-NEXT: br i1 [[TOBOOL]], label [[LAND_RHS:%.*]], label [[LAND_END:%.*]]
// CHECK2: land.rhs:
// CHECK2-NEXT: [[CALL3:%.*]] = call i32 @_ZN1SIiEcviEv(%struct.S.0* nonnull align 4 dereferenceable(4) [[TMP20]])
// CHECK2-NEXT: [[TOBOOL4:%.*]] = icmp ne i32 [[CALL3]], 0
// CHECK2-NEXT: br label [[LAND_END]]
// CHECK2: land.end:
// CHECK2-NEXT: [[TMP34:%.*]] = phi i1 [ false, [[ENTRY:%.*]] ], [ [[TOBOOL4]], [[LAND_RHS]] ]
// CHECK2-NEXT: [[CONV:%.*]] = zext i1 [[TMP34]] to i32
// CHECK2-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull align 4 dereferenceable(4) [[REF_TMP]], i32 [[CONV]])
// CHECK2-NEXT: [[TMP35:%.*]] = bitcast %struct.S.0* [[TMP23]] to i8*
// CHECK2-NEXT: [[TMP36:%.*]] = bitcast %struct.S.0* [[REF_TMP]] to i8*
// CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 128 [[TMP35]], i8* align 4 [[TMP36]], i64 4, i1 false)
// CHECK2-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[REF_TMP]]) #[[ATTR5]]
// CHECK2-NEXT: [[TMP37:%.*]] = load i32, i32* [[TMP29]], align 128
// CHECK2-NEXT: [[TMP38:%.*]] = load i32, i32* [[TMP26]], align 128
// CHECK2-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP37]], [[TMP38]]
// CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK2: cond.true:
// CHECK2-NEXT: [[TMP39:%.*]] = load i32, i32* [[TMP29]], align 128
// CHECK2-NEXT: br label [[COND_END:%.*]]
// CHECK2: cond.false:
// CHECK2-NEXT: [[TMP40:%.*]] = load i32, i32* [[TMP26]], align 128
// CHECK2-NEXT: br label [[COND_END]]
// CHECK2: cond.end:
// CHECK2-NEXT: [[COND:%.*]] = phi i32 [ [[TMP39]], [[COND_TRUE]] ], [ [[TMP40]], [[COND_FALSE]] ]
// CHECK2-NEXT: store i32 [[COND]], i32* [[TMP29]], align 128
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@_ZN1SIiEanERKS0_
// CHECK2-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]], %struct.S.0* nonnull align 4 dereferenceable(4) [[TMP0:%.*]]) #[[ATTR0]] align 2 {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
// CHECK2-NEXT: [[DOTADDR:%.*]] = alloca %struct.S.0*, align 8
// CHECK2-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
// CHECK2-NEXT: store %struct.S.0* [[TMP0]], %struct.S.0** [[DOTADDR]], align 8
// CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
// CHECK2-NEXT: ret %struct.S.0* [[THIS1]]
//
//
// CHECK2-LABEL: define {{[^@]+}}@_ZN1SIiEcviEv
// CHECK2-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) #[[ATTR0]] align 2 {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
// CHECK2-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
// CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
// CHECK2-NEXT: ret i32 0
//
//
// CHECK2-LABEL: define {{[^@]+}}@_ZN1SIiED1Ev
// CHECK2-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR7]] align 2 {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
// CHECK2-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
// CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
// CHECK2-NEXT: call void @_ZN1SIiED2Ev(%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR5]]
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ev
// CHECK2-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR7]] align 2 {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
// CHECK2-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
// CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
// CHECK2-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
// CHECK2-NEXT: [[TMP0:%.*]] = load volatile i32, i32* @g, align 128
// CHECK2-NEXT: store i32 [[TMP0]], i32* [[F]], align 4
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@_ZN3SSTIiEC2Ev
// CHECK2-SAME: (%struct.SST* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR7]] align 2 {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SST*, align 8
// CHECK2-NEXT: [[A2:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: store %struct.SST* [[THIS]], %struct.SST** [[THIS_ADDR]], align 8
// CHECK2-NEXT: [[THIS1:%.*]] = load %struct.SST*, %struct.SST** [[THIS_ADDR]], align 8
// CHECK2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SST:%.*]], %struct.SST* [[THIS1]], i32 0, i32 0
// CHECK2-NEXT: store i32 0, i32* [[A]], align 4
// CHECK2-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_SST]], %struct.SST* [[THIS1]], i32 0, i32 0
// CHECK2-NEXT: store i32* [[A3]], i32** [[A2]], align 8
// CHECK2-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A2]], align 8
// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SST*, i32*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), %struct.SST* [[THIS1]], i32* [[TMP0]])
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..10
// CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SST* [[THIS:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR1]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SST*, align 8
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[TMP:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[A1:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[_TMP2:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
// CHECK2-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[_TMP3:%.*]] = alloca i32, align 4
// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK2-NEXT: store %struct.SST* [[THIS]], %struct.SST** [[THIS_ADDR]], align 8
// CHECK2-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK2-NEXT: [[TMP0:%.*]] = load %struct.SST*, %struct.SST** [[THIS_ADDR]], align 8
// CHECK2-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK2-NEXT: store i32* [[TMP1]], i32** [[TMP]], align 8
// CHECK2-NEXT: [[TMP2:%.*]] = load i32*, i32** [[TMP]], align 8
// CHECK2-NEXT: store i32 1, i32* [[A1]], align 4
// CHECK2-NEXT: store i32* [[A1]], i32** [[_TMP2]], align 8
// CHECK2-NEXT: [[TMP3:%.*]] = load i32*, i32** [[_TMP2]], align 8
// CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK2-NEXT: [[INC:%.*]] = add nsw i32 [[TMP4]], 1
// CHECK2-NEXT: store i32 [[INC]], i32* [[TMP3]], align 4
// CHECK2-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// CHECK2-NEXT: [[TMP6:%.*]] = bitcast i32* [[A1]] to i8*
// CHECK2-NEXT: store i8* [[TMP6]], i8** [[TMP5]], align 8
// CHECK2-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
// CHECK2-NEXT: [[TMP9:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// CHECK2-NEXT: [[TMP10:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]], i32 1, i64 8, i8* [[TMP9]], void (i8*, i8*)* @.omp.reduction.reduction_func.11, [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK2-NEXT: switch i32 [[TMP10]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// CHECK2-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
// CHECK2-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
// CHECK2-NEXT: ]
// CHECK2: .omp.reduction.case1:
// CHECK2-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[A1]], align 4
// CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], [[TMP12]]
// CHECK2-NEXT: store i32 [[MUL]], i32* [[TMP2]], align 4
// CHECK2-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK2: .omp.reduction.case2:
// CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[A1]], align 4
// CHECK2-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic i32, i32* [[TMP2]] monotonic, align 4
// CHECK2-NEXT: br label [[ATOMIC_CONT:%.*]]
// CHECK2: atomic_cont:
// CHECK2-NEXT: [[TMP14:%.*]] = phi i32 [ [[ATOMIC_LOAD]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[TMP19:%.*]], [[ATOMIC_CONT]] ]
// CHECK2-NEXT: store i32 [[TMP14]], i32* [[_TMP3]], align 4
// CHECK2-NEXT: [[TMP15:%.*]] = load i32, i32* [[_TMP3]], align 4
// CHECK2-NEXT: [[TMP16:%.*]] = load i32, i32* [[A1]], align 4
// CHECK2-NEXT: [[MUL4:%.*]] = mul nsw i32 [[TMP15]], [[TMP16]]
// CHECK2-NEXT: store i32 [[MUL4]], i32* [[ATOMIC_TEMP]], align 4
// CHECK2-NEXT: [[TMP17:%.*]] = load i32, i32* [[ATOMIC_TEMP]], align 4
// CHECK2-NEXT: [[TMP18:%.*]] = cmpxchg i32* [[TMP2]], i32 [[TMP14]], i32 [[TMP17]] monotonic monotonic, align 4
// CHECK2-NEXT: [[TMP19]] = extractvalue { i32, i1 } [[TMP18]], 0
// CHECK2-NEXT: [[TMP20:%.*]] = extractvalue { i32, i1 } [[TMP18]], 1
// CHECK2-NEXT: br i1 [[TMP20]], label [[ATOMIC_EXIT:%.*]], label [[ATOMIC_CONT]]
// CHECK2: atomic_exit:
// CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK2: .omp.reduction.default:
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.11
// CHECK2-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR3]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
// CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
// CHECK2-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
// CHECK2-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
// CHECK2-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
// CHECK2-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
// CHECK2-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
// CHECK2-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
// CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
// CHECK2-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// CHECK2-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
// CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
// CHECK2-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
// CHECK2-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
// CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
// CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP8]], align 4
// CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP12]], [[TMP13]]
// CHECK2-NEXT: store i32 [[MUL]], i32* [[TMP11]], align 4
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ei
// CHECK2-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 [[A:%.*]]) unnamed_addr #[[ATTR7]] align 2 {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
// CHECK2-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
// CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
// CHECK2-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
// CHECK2-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = load volatile i32, i32* @g, align 128
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], [[TMP1]]
// CHECK2-NEXT: store i32 [[ADD]], i32* [[F]], align 4
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@_ZN1SIiED2Ev
// CHECK2-SAME: (%struct.S.0* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR7]] align 2 {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
// CHECK2-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
// CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
// CHECK2-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@_Z14foo_array_sectPs
// CHECK3-SAME: (i16* [[X:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[X_ADDR:%.*]] = alloca i16*, align 8
// CHECK3-NEXT: store i16* [[X]], i16** [[X_ADDR]], align 8
// CHECK3-NEXT: [[TMP0:%.*]] = load i16*, i16** [[X_ADDR]], align 8
// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i16*)* @.omp_outlined. to void (i32*, i32*, ...)*), i16* [[TMP0]])
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i16* [[X:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK3-NEXT: [[X_ADDR:%.*]] = alloca i16*, align 8
// CHECK3-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
// CHECK3-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
// CHECK3-NEXT: [[TMP:%.*]] = alloca i16*, align 8
// CHECK3-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x i8*], align 8
// CHECK3-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i16, align 2
// CHECK3-NEXT: [[_TMP13:%.*]] = alloca i16, align 2
// CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK3-NEXT: store i16* [[X]], i16** [[X_ADDR]], align 8
// CHECK3-NEXT: [[TMP0:%.*]] = load i16*, i16** [[X_ADDR]], align 8
// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP0]], i64 0
// CHECK3-NEXT: [[TMP1:%.*]] = load i16*, i16** [[X_ADDR]], align 8
// CHECK3-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i16, i16* [[TMP1]], i64 0
// CHECK3-NEXT: [[TMP2:%.*]] = ptrtoint i16* [[ARRAYIDX1]] to i64
// CHECK3-NEXT: [[TMP3:%.*]] = ptrtoint i16* [[ARRAYIDX]] to i64
// CHECK3-NEXT: [[TMP4:%.*]] = sub i64 [[TMP2]], [[TMP3]]
// CHECK3-NEXT: [[TMP5:%.*]] = sdiv exact i64 [[TMP4]], ptrtoint (i16* getelementptr (i16, i16* null, i32 1) to i64)
// CHECK3-NEXT: [[TMP6:%.*]] = add nuw i64 [[TMP5]], 1
// CHECK3-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], ptrtoint (i16* getelementptr (i16, i16* null, i32 1) to i64)
// CHECK3-NEXT: [[TMP8:%.*]] = call i8* @llvm.stacksave()
// CHECK3-NEXT: store i8* [[TMP8]], i8** [[SAVED_STACK]], align 8
// CHECK3-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP6]], align 16
// CHECK3-NEXT: store i64 [[TMP6]], i64* [[__VLA_EXPR0]], align 8
// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr i16, i16* [[VLA]], i64 [[TMP6]]
// CHECK3-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i16* [[VLA]], [[TMP9]]
// CHECK3-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]]
// CHECK3: omp.arrayinit.body:
// CHECK3-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i16* [ [[VLA]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ]
// CHECK3-NEXT: store i16 0, i16* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 2
// CHECK3-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i16, i16* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
// CHECK3-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i16* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP9]]
// CHECK3-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]]
// CHECK3: omp.arrayinit.done:
// CHECK3-NEXT: [[TMP10:%.*]] = load i16*, i16** [[X_ADDR]], align 8
// CHECK3-NEXT: [[TMP11:%.*]] = ptrtoint i16* [[TMP10]] to i64
// CHECK3-NEXT: [[TMP12:%.*]] = ptrtoint i16* [[ARRAYIDX]] to i64
// CHECK3-NEXT: [[TMP13:%.*]] = sub i64 [[TMP11]], [[TMP12]]
// CHECK3-NEXT: [[TMP14:%.*]] = sdiv exact i64 [[TMP13]], ptrtoint (i16* getelementptr (i16, i16* null, i32 1) to i64)
// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr i16, i16* [[VLA]], i64 [[TMP14]]
// CHECK3-NEXT: store i16* [[TMP15]], i16** [[TMP]], align 8
// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// CHECK3-NEXT: [[TMP17:%.*]] = bitcast i16* [[VLA]] to i8*
// CHECK3-NEXT: store i8* [[TMP17]], i8** [[TMP16]], align 8
// CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1
// CHECK3-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP6]] to i8*
// CHECK3-NEXT: store i8* [[TMP19]], i8** [[TMP18]], align 8
// CHECK3-NEXT: [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK3-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
// CHECK3-NEXT: [[TMP22:%.*]] = bitcast [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// CHECK3-NEXT: [[TMP23:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP21]], i32 1, i64 16, i8* [[TMP22]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK3-NEXT: switch i32 [[TMP23]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// CHECK3-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
// CHECK3-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
// CHECK3-NEXT: ]
// CHECK3: .omp.reduction.case1:
// CHECK3-NEXT: [[TMP24:%.*]] = getelementptr i16, i16* [[ARRAYIDX]], i64 [[TMP6]]
// CHECK3-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i16* [[ARRAYIDX]], [[TMP24]]
// CHECK3-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE7:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
// CHECK3: omp.arraycpy.body:
// CHECK3-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i16* [ [[VLA]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK3-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST2:%.*]] = phi i16* [ [[ARRAYIDX]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT5:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK3-NEXT: [[TMP25:%.*]] = load i16, i16* [[OMP_ARRAYCPY_DESTELEMENTPAST2]], align 2
// CHECK3-NEXT: [[CONV:%.*]] = sext i16 [[TMP25]] to i32
// CHECK3-NEXT: [[TMP26:%.*]] = load i16, i16* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 2
// CHECK3-NEXT: [[CONV3:%.*]] = sext i16 [[TMP26]] to i32
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CONV3]]
// CHECK3-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD]] to i16
// CHECK3-NEXT: store i16 [[CONV4]], i16* [[OMP_ARRAYCPY_DESTELEMENTPAST2]], align 2
// CHECK3-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT5]] = getelementptr i16, i16* [[OMP_ARRAYCPY_DESTELEMENTPAST2]], i32 1
// CHECK3-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i16, i16* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
// CHECK3-NEXT: [[OMP_ARRAYCPY_DONE6:%.*]] = icmp eq i16* [[OMP_ARRAYCPY_DEST_ELEMENT5]], [[TMP24]]
// CHECK3-NEXT: br i1 [[OMP_ARRAYCPY_DONE6]], label [[OMP_ARRAYCPY_DONE7]], label [[OMP_ARRAYCPY_BODY]]
// CHECK3: omp.arraycpy.done7:
// CHECK3-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK3: .omp.reduction.case2:
// CHECK3-NEXT: [[TMP27:%.*]] = getelementptr i16, i16* [[ARRAYIDX]], i64 [[TMP6]]
// CHECK3-NEXT: [[OMP_ARRAYCPY_ISEMPTY8:%.*]] = icmp eq i16* [[ARRAYIDX]], [[TMP27]]
// CHECK3-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY8]], label [[OMP_ARRAYCPY_DONE21:%.*]], label [[OMP_ARRAYCPY_BODY9:%.*]]
// CHECK3: omp.arraycpy.body9:
// CHECK3-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST10:%.*]] = phi i16* [ [[VLA]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT19:%.*]], [[ATOMIC_EXIT:%.*]] ]
// CHECK3-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST11:%.*]] = phi i16* [ [[ARRAYIDX]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT18:%.*]], [[ATOMIC_EXIT]] ]
// CHECK3-NEXT: [[TMP28:%.*]] = load i16, i16* [[OMP_ARRAYCPY_SRCELEMENTPAST10]], align 2
// CHECK3-NEXT: [[CONV12:%.*]] = sext i16 [[TMP28]] to i32
// CHECK3-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic i16, i16* [[OMP_ARRAYCPY_DESTELEMENTPAST11]] monotonic, align 2
// CHECK3-NEXT: br label [[ATOMIC_CONT:%.*]]
// CHECK3: atomic_cont:
// CHECK3-NEXT: [[TMP29:%.*]] = phi i16 [ [[ATOMIC_LOAD]], [[OMP_ARRAYCPY_BODY9]] ], [ [[TMP34:%.*]], [[ATOMIC_CONT]] ]
// CHECK3-NEXT: store i16 [[TMP29]], i16* [[_TMP13]], align 2
// CHECK3-NEXT: [[TMP30:%.*]] = load i16, i16* [[_TMP13]], align 2
// CHECK3-NEXT: [[CONV14:%.*]] = sext i16 [[TMP30]] to i32
// CHECK3-NEXT: [[TMP31:%.*]] = load i16, i16* [[OMP_ARRAYCPY_SRCELEMENTPAST10]], align 2
// CHECK3-NEXT: [[CONV15:%.*]] = sext i16 [[TMP31]] to i32
// CHECK3-NEXT: [[ADD16:%.*]] = add nsw i32 [[CONV14]], [[CONV15]]
// CHECK3-NEXT: [[CONV17:%.*]] = trunc i32 [[ADD16]] to i16
// CHECK3-NEXT: store i16 [[CONV17]], i16* [[ATOMIC_TEMP]], align 2
// CHECK3-NEXT: [[TMP32:%.*]] = load i16, i16* [[ATOMIC_TEMP]], align 2
// CHECK3-NEXT: [[TMP33:%.*]] = cmpxchg i16* [[OMP_ARRAYCPY_DESTELEMENTPAST11]], i16 [[TMP29]], i16 [[TMP32]] monotonic monotonic, align 2
// CHECK3-NEXT: [[TMP34]] = extractvalue { i16, i1 } [[TMP33]], 0
// CHECK3-NEXT: [[TMP35:%.*]] = extractvalue { i16, i1 } [[TMP33]], 1
// CHECK3-NEXT: br i1 [[TMP35]], label [[ATOMIC_EXIT]], label [[ATOMIC_CONT]]
// CHECK3: atomic_exit:
// CHECK3-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT18]] = getelementptr i16, i16* [[OMP_ARRAYCPY_DESTELEMENTPAST11]], i32 1
// CHECK3-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT19]] = getelementptr i16, i16* [[OMP_ARRAYCPY_SRCELEMENTPAST10]], i32 1
// CHECK3-NEXT: [[OMP_ARRAYCPY_DONE20:%.*]] = icmp eq i16* [[OMP_ARRAYCPY_DEST_ELEMENT18]], [[TMP27]]
// CHECK3-NEXT: br i1 [[OMP_ARRAYCPY_DONE20]], label [[OMP_ARRAYCPY_DONE21]], label [[OMP_ARRAYCPY_BODY9]]
// CHECK3: omp.arraycpy.done21:
// CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK3: .omp.reduction.default:
// CHECK3-NEXT: [[TMP36:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
// CHECK3-NEXT: call void @llvm.stackrestore(i8* [[TMP36]])
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func
// CHECK3-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR3:[0-9]+]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
// CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
// CHECK3-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
// CHECK3-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
// CHECK3-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
// CHECK3-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [2 x i8*]*
// CHECK3-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
// CHECK3-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [2 x i8*]*
// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i64 0, i64 0
// CHECK3-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// CHECK3-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i16*
// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP3]], i64 0, i64 0
// CHECK3-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
// CHECK3-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i16*
// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP3]], i64 0, i64 1
// CHECK3-NEXT: [[TMP13:%.*]] = load i8*, i8** [[TMP12]], align 8
// CHECK3-NEXT: [[TMP14:%.*]] = ptrtoint i8* [[TMP13]] to i64
// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr i16, i16* [[TMP11]], i64 [[TMP14]]
// CHECK3-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i16* [[TMP11]], [[TMP15]]
// CHECK3-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE4:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
// CHECK3: omp.arraycpy.body:
// CHECK3-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i16* [ [[TMP8]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK3-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i16* [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK3-NEXT: [[TMP16:%.*]] = load i16, i16* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 2
// CHECK3-NEXT: [[CONV:%.*]] = sext i16 [[TMP16]] to i32
// CHECK3-NEXT: [[TMP17:%.*]] = load i16, i16* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 2
// CHECK3-NEXT: [[CONV2:%.*]] = sext i16 [[TMP17]] to i32
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CONV2]]
// CHECK3-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD]] to i16
// CHECK3-NEXT: store i16 [[CONV3]], i16* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 2
// CHECK3-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i16, i16* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
// CHECK3-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i16, i16* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
// CHECK3-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i16* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP15]]
// CHECK3-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE4]], label [[OMP_ARRAYCPY_BODY]]
// CHECK3: omp.arraycpy.done4:
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@main
// CHECK3-SAME: () #[[ATTR6:[0-9]+]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[SS:%.*]] = alloca [[STRUCT_SS:%.*]], align 8
// CHECK3-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 1
// CHECK3-NEXT: store i32 0, i32* [[RETVAL]], align 4
// CHECK3-NEXT: call void @_ZN2SSC1ERi(%struct.SS* nonnull align 8 dereferenceable(16) [[SS]], i32* nonnull align 4 dereferenceable(4) @sivar)
// CHECK3-NEXT: call void @"_ZZ4mainENK3$_0clEv"(%class.anon* nonnull align 1 dereferenceable(1) [[REF_TMP]])
// CHECK3-NEXT: ret i32 0
//
//
// CHECK3-LABEL: define {{[^@]+}}@_ZN2SSC1ERi
// CHECK3-SAME: (%struct.SS* nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32* nonnull align 4 dereferenceable(4) [[D:%.*]]) unnamed_addr #[[ATTR7:[0-9]+]] align 2 {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK3-NEXT: [[D_ADDR:%.*]] = alloca i32*, align 8
// CHECK3-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK3-NEXT: store i32* [[D]], i32** [[D_ADDR]], align 8
// CHECK3-NEXT: [[THIS1:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[D_ADDR]], align 8
// CHECK3-NEXT: call void @_ZN2SSC2ERi(%struct.SS* nonnull align 8 dereferenceable(16) [[THIS1]], i32* nonnull align 4 dereferenceable(4) [[TMP0]])
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@_ZN2SSC2ERi
// CHECK3-SAME: (%struct.SS* nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32* nonnull align 4 dereferenceable(4) [[D:%.*]]) unnamed_addr #[[ATTR7]] align 2 {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK3-NEXT: [[D_ADDR:%.*]] = alloca i32*, align 8
// CHECK3-NEXT: [[A2:%.*]] = alloca i32*, align 8
// CHECK3-NEXT: [[B4:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[C5:%.*]] = alloca i32*, align 8
// CHECK3-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK3-NEXT: store i32* [[D]], i32** [[D_ADDR]], align 8
// CHECK3-NEXT: [[THIS1:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[THIS1]], i32 0, i32 0
// CHECK3-NEXT: store i32 0, i32* [[A]], align 8
// CHECK3-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 1
// CHECK3-NEXT: [[BF_LOAD:%.*]] = load i8, i8* [[B]], align 4
// CHECK3-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -16
// CHECK3-NEXT: store i8 [[BF_CLEAR]], i8* [[B]], align 4
// CHECK3-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 2
// CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[D_ADDR]], align 8
// CHECK3-NEXT: store i32* [[TMP0]], i32** [[C]], align 8
// CHECK3-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
// CHECK3-NEXT: store i32* [[A3]], i32** [[A2]], align 8
// CHECK3-NEXT: [[C6:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 2
// CHECK3-NEXT: [[TMP1:%.*]] = load i32*, i32** [[C6]], align 8
// CHECK3-NEXT: store i32* [[TMP1]], i32** [[C5]], align 8
// CHECK3-NEXT: [[TMP2:%.*]] = load i32*, i32** [[A2]], align 8
// CHECK3-NEXT: [[TMP3:%.*]] = load i32*, i32** [[C5]], align 8
// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*, i32*, i32*, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), %struct.SS* [[THIS1]], i32* [[TMP2]], i32* [[B4]], i32* [[TMP3]])
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[B4]], align 4
// CHECK3-NEXT: [[B7:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 1
// CHECK3-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i8
// CHECK3-NEXT: [[BF_LOAD8:%.*]] = load i8, i8* [[B7]], align 4
// CHECK3-NEXT: [[BF_VALUE:%.*]] = and i8 [[TMP5]], 15
// CHECK3-NEXT: [[BF_CLEAR9:%.*]] = and i8 [[BF_LOAD8]], -16
// CHECK3-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR9]], [[BF_VALUE]]
// CHECK3-NEXT: store i8 [[BF_SET]], i8* [[B7]], align 4
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i32* nonnull align 4 dereferenceable(4) [[B:%.*]], i32* nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK3-NEXT: [[B_ADDR:%.*]] = alloca i32*, align 8
// CHECK3-NEXT: [[C_ADDR:%.*]] = alloca i32*, align 8
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32*, align 8
// CHECK3-NEXT: [[_TMP1:%.*]] = alloca i32*, align 8
// CHECK3-NEXT: [[A2:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[_TMP3:%.*]] = alloca i32*, align 8
// CHECK3-NEXT: [[B4:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[C5:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[_TMP6:%.*]] = alloca i32*, align 8
// CHECK3-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 8
// CHECK3-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [3 x i8*], align 8
// CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK3-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK3-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK3-NEXT: store i32* [[B]], i32** [[B_ADDR]], align 8
// CHECK3-NEXT: store i32* [[C]], i32** [[C_ADDR]], align 8
// CHECK3-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK3-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK3-NEXT: [[TMP2:%.*]] = load i32*, i32** [[B_ADDR]], align 8
// CHECK3-NEXT: [[TMP3:%.*]] = load i32*, i32** [[C_ADDR]], align 8
// CHECK3-NEXT: store i32* [[TMP1]], i32** [[TMP]], align 8
// CHECK3-NEXT: store i32* [[TMP3]], i32** [[_TMP1]], align 8
// CHECK3-NEXT: [[TMP4:%.*]] = load i32*, i32** [[TMP]], align 8
// CHECK3-NEXT: store i32 0, i32* [[A2]], align 4
// CHECK3-NEXT: store i32* [[A2]], i32** [[_TMP3]], align 8
// CHECK3-NEXT: store i32 0, i32* [[B4]], align 4
// CHECK3-NEXT: [[TMP5:%.*]] = load i32*, i32** [[_TMP1]], align 8
// CHECK3-NEXT: store i32 0, i32* [[C5]], align 4
// CHECK3-NEXT: store i32* [[C5]], i32** [[_TMP6]], align 8
// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 0
// CHECK3-NEXT: store %struct.SS* [[TMP0]], %struct.SS** [[TMP6]], align 8
// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 1
// CHECK3-NEXT: [[TMP8:%.*]] = load i32*, i32** [[_TMP3]], align 8
// CHECK3-NEXT: store i32* [[TMP8]], i32** [[TMP7]], align 8
// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 2
// CHECK3-NEXT: store i32* [[B4]], i32** [[TMP9]], align 8
// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 3
// CHECK3-NEXT: [[TMP11:%.*]] = load i32*, i32** [[_TMP6]], align 8
// CHECK3-NEXT: store i32* [[TMP11]], i32** [[TMP10]], align 8
// CHECK3-NEXT: call void @_ZZN2SSC1ERiENKUlvE_clEv(%class.anon.0* nonnull align 8 dereferenceable(32) [[REF_TMP]])
// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// CHECK3-NEXT: [[TMP13:%.*]] = bitcast i32* [[A2]] to i8*
// CHECK3-NEXT: store i8* [[TMP13]], i8** [[TMP12]], align 8
// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1
// CHECK3-NEXT: [[TMP15:%.*]] = bitcast i32* [[B4]] to i8*
// CHECK3-NEXT: store i8* [[TMP15]], i8** [[TMP14]], align 8
// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 2
// CHECK3-NEXT: [[TMP17:%.*]] = bitcast i32* [[C5]] to i8*
// CHECK3-NEXT: store i8* [[TMP17]], i8** [[TMP16]], align 8
// CHECK3-NEXT: [[TMP18:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK3-NEXT: [[TMP19:%.*]] = load i32, i32* [[TMP18]], align 4
// CHECK3-NEXT: [[TMP20:%.*]] = bitcast [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// CHECK3-NEXT: [[TMP21:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB1]], i32 [[TMP19]], i32 3, i64 24, i8* [[TMP20]], void (i8*, i8*)* @.omp.reduction.reduction_func.2, [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK3-NEXT: switch i32 [[TMP21]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// CHECK3-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
// CHECK3-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
// CHECK3-NEXT: ]
// CHECK3: .omp.reduction.case1:
// CHECK3-NEXT: [[TMP22:%.*]] = load i32, i32* [[TMP4]], align 4
// CHECK3-NEXT: [[TMP23:%.*]] = load i32, i32* [[A2]], align 4
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
// CHECK3-NEXT: store i32 [[ADD]], i32* [[TMP4]], align 4
// CHECK3-NEXT: [[TMP24:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK3-NEXT: [[TMP25:%.*]] = load i32, i32* [[B4]], align 4
// CHECK3-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP24]], [[TMP25]]
// CHECK3-NEXT: store i32 [[ADD7]], i32* [[TMP2]], align 4
// CHECK3-NEXT: [[TMP26:%.*]] = load i32, i32* [[TMP5]], align 4
// CHECK3-NEXT: [[TMP27:%.*]] = load i32, i32* [[C5]], align 4
// CHECK3-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP26]], [[TMP27]]
// CHECK3-NEXT: store i32 [[ADD8]], i32* [[TMP5]], align 4
// CHECK3-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB1]], i32 [[TMP19]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK3: .omp.reduction.case2:
// CHECK3-NEXT: [[TMP28:%.*]] = load i32, i32* [[A2]], align 4
// CHECK3-NEXT: [[TMP29:%.*]] = atomicrmw add i32* [[TMP4]], i32 [[TMP28]] monotonic, align 4
// CHECK3-NEXT: [[TMP30:%.*]] = load i32, i32* [[B4]], align 4
// CHECK3-NEXT: [[TMP31:%.*]] = atomicrmw add i32* [[TMP2]], i32 [[TMP30]] monotonic, align 4
// CHECK3-NEXT: [[TMP32:%.*]] = load i32, i32* [[C5]], align 4
// CHECK3-NEXT: [[TMP33:%.*]] = atomicrmw add i32* [[TMP5]], i32 [[TMP32]] monotonic, align 4
// CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK3: .omp.reduction.default:
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@_ZZN2SSC1ERiENKUlvE_clEv
// CHECK3-SAME: (%class.anon.0* nonnull align 8 dereferenceable(32) [[THIS:%.*]]) #[[ATTR0]] align 2 {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %class.anon.0*, align 8
// CHECK3-NEXT: store %class.anon.0* [[THIS]], %class.anon.0** [[THIS_ADDR]], align 8
// CHECK3-NEXT: [[THIS1:%.*]] = load %class.anon.0*, %class.anon.0** [[THIS_ADDR]], align 8
// CHECK3-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[CLASS_ANON_0:%.*]], %class.anon.0* [[THIS1]], i32 0, i32 0
// CHECK3-NEXT: [[TMP1:%.*]] = load %struct.SS*, %struct.SS** [[TMP0]], align 8
// CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[THIS1]], i32 0, i32 1
// CHECK3-NEXT: [[TMP3:%.*]] = load i32*, i32** [[TMP2]], align 8
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK3-NEXT: [[INC:%.*]] = add nsw i32 [[TMP4]], 1
// CHECK3-NEXT: store i32 [[INC]], i32* [[TMP3]], align 4
// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[THIS1]], i32 0, i32 2
// CHECK3-NEXT: [[TMP6:%.*]] = load i32*, i32** [[TMP5]], align 8
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
// CHECK3-NEXT: [[DEC:%.*]] = add nsw i32 [[TMP7]], -1
// CHECK3-NEXT: store i32 [[DEC]], i32* [[TMP6]], align 4
// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[THIS1]], i32 0, i32 3
// CHECK3-NEXT: [[TMP9:%.*]] = load i32*, i32** [[TMP8]], align 8
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
// CHECK3-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP10]], 1
// CHECK3-NEXT: store i32 [[DIV]], i32* [[TMP9]], align 4
// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[THIS1]], i32 0, i32 1
// CHECK3-NEXT: [[TMP12:%.*]] = load i32*, i32** [[TMP11]], align 8
// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[THIS1]], i32 0, i32 2
// CHECK3-NEXT: [[TMP14:%.*]] = load i32*, i32** [[TMP13]], align 8
// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[THIS1]], i32 0, i32 3
// CHECK3-NEXT: [[TMP16:%.*]] = load i32*, i32** [[TMP15]], align 8
// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*, i32*, i32*, i32*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), %struct.SS* [[TMP1]], i32* [[TMP12]], i32* [[TMP14]], i32* [[TMP16]])
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.2
// CHECK3-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR3]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
// CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
// CHECK3-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
// CHECK3-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
// CHECK3-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
// CHECK3-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [3 x i8*]*
// CHECK3-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
// CHECK3-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [3 x i8*]*
// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP5]], i64 0, i64 0
// CHECK3-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// CHECK3-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP3]], i64 0, i64 0
// CHECK3-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
// CHECK3-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP5]], i64 0, i64 1
// CHECK3-NEXT: [[TMP13:%.*]] = load i8*, i8** [[TMP12]], align 8
// CHECK3-NEXT: [[TMP14:%.*]] = bitcast i8* [[TMP13]] to i32*
// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP3]], i64 0, i64 1
// CHECK3-NEXT: [[TMP16:%.*]] = load i8*, i8** [[TMP15]], align 8
// CHECK3-NEXT: [[TMP17:%.*]] = bitcast i8* [[TMP16]] to i32*
// CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP5]], i64 0, i64 2
// CHECK3-NEXT: [[TMP19:%.*]] = load i8*, i8** [[TMP18]], align 8
// CHECK3-NEXT: [[TMP20:%.*]] = bitcast i8* [[TMP19]] to i32*
// CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP3]], i64 0, i64 2
// CHECK3-NEXT: [[TMP22:%.*]] = load i8*, i8** [[TMP21]], align 8
// CHECK3-NEXT: [[TMP23:%.*]] = bitcast i8* [[TMP22]] to i32*
// CHECK3-NEXT: [[TMP24:%.*]] = load i32, i32* [[TMP11]], align 4
// CHECK3-NEXT: [[TMP25:%.*]] = load i32, i32* [[TMP8]], align 4
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP24]], [[TMP25]]
// CHECK3-NEXT: store i32 [[ADD]], i32* [[TMP11]], align 4
// CHECK3-NEXT: [[TMP26:%.*]] = load i32, i32* [[TMP17]], align 4
// CHECK3-NEXT: [[TMP27:%.*]] = load i32, i32* [[TMP14]], align 4
// CHECK3-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP26]], [[TMP27]]
// CHECK3-NEXT: store i32 [[ADD2]], i32* [[TMP17]], align 4
// CHECK3-NEXT: [[TMP28:%.*]] = load i32, i32* [[TMP23]], align 4
// CHECK3-NEXT: [[TMP29:%.*]] = load i32, i32* [[TMP20]], align 4
// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP28]], [[TMP29]]
// CHECK3-NEXT: store i32 [[ADD3]], i32* [[TMP23]], align 4
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..3
// CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i32* nonnull align 4 dereferenceable(4) [[B:%.*]], i32* nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR1]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK3-NEXT: [[B_ADDR:%.*]] = alloca i32*, align 8
// CHECK3-NEXT: [[C_ADDR:%.*]] = alloca i32*, align 8
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32*, align 8
// CHECK3-NEXT: [[_TMP1:%.*]] = alloca i32*, align 8
// CHECK3-NEXT: [[A2:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[_TMP3:%.*]] = alloca i32*, align 8
// CHECK3-NEXT: [[B4:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[C5:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[_TMP6:%.*]] = alloca i32*, align 8
// CHECK3-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [3 x i8*], align 8
// CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK3-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK3-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK3-NEXT: store i32* [[B]], i32** [[B_ADDR]], align 8
// CHECK3-NEXT: store i32* [[C]], i32** [[C_ADDR]], align 8
// CHECK3-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK3-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK3-NEXT: [[TMP2:%.*]] = load i32*, i32** [[B_ADDR]], align 8
// CHECK3-NEXT: [[TMP3:%.*]] = load i32*, i32** [[C_ADDR]], align 8
// CHECK3-NEXT: store i32* [[TMP1]], i32** [[TMP]], align 8
// CHECK3-NEXT: store i32* [[TMP3]], i32** [[_TMP1]], align 8
// CHECK3-NEXT: [[TMP4:%.*]] = load i32*, i32** [[TMP]], align 8
// CHECK3-NEXT: store i32 -1, i32* [[A2]], align 4
// CHECK3-NEXT: store i32* [[A2]], i32** [[_TMP3]], align 8
// CHECK3-NEXT: store i32 -1, i32* [[B4]], align 4
// CHECK3-NEXT: [[TMP5:%.*]] = load i32*, i32** [[_TMP1]], align 8
// CHECK3-NEXT: store i32 -1, i32* [[C5]], align 4
// CHECK3-NEXT: store i32* [[C5]], i32** [[_TMP6]], align 8
// CHECK3-NEXT: [[TMP6:%.*]] = load i32*, i32** [[_TMP3]], align 8
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
// CHECK3-NEXT: [[INC:%.*]] = add nsw i32 [[TMP7]], 1
// CHECK3-NEXT: store i32 [[INC]], i32* [[TMP6]], align 4
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[B4]], align 4
// CHECK3-NEXT: [[DEC:%.*]] = add nsw i32 [[TMP8]], -1
// CHECK3-NEXT: store i32 [[DEC]], i32* [[B4]], align 4
// CHECK3-NEXT: [[TMP9:%.*]] = load i32*, i32** [[_TMP6]], align 8
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
// CHECK3-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP10]], 1
// CHECK3-NEXT: store i32 [[DIV]], i32* [[TMP9]], align 4
// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// CHECK3-NEXT: [[TMP12:%.*]] = bitcast i32* [[A2]] to i8*
// CHECK3-NEXT: store i8* [[TMP12]], i8** [[TMP11]], align 8
// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1
// CHECK3-NEXT: [[TMP14:%.*]] = bitcast i32* [[B4]] to i8*
// CHECK3-NEXT: store i8* [[TMP14]], i8** [[TMP13]], align 8
// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 2
// CHECK3-NEXT: [[TMP16:%.*]] = bitcast i32* [[C5]] to i8*
// CHECK3-NEXT: store i8* [[TMP16]], i8** [[TMP15]], align 8
// CHECK3-NEXT: [[TMP17:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK3-NEXT: [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 4
// CHECK3-NEXT: [[TMP19:%.*]] = bitcast [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// CHECK3-NEXT: [[TMP20:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB1]], i32 [[TMP18]], i32 3, i64 24, i8* [[TMP19]], void (i8*, i8*)* @.omp.reduction.reduction_func.4, [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK3-NEXT: switch i32 [[TMP20]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// CHECK3-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
// CHECK3-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
// CHECK3-NEXT: ]
// CHECK3: .omp.reduction.case1:
// CHECK3-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP4]], align 4
// CHECK3-NEXT: [[TMP22:%.*]] = load i32, i32* [[A2]], align 4
// CHECK3-NEXT: [[AND:%.*]] = and i32 [[TMP21]], [[TMP22]]
// CHECK3-NEXT: store i32 [[AND]], i32* [[TMP4]], align 4
// CHECK3-NEXT: [[TMP23:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK3-NEXT: [[TMP24:%.*]] = load i32, i32* [[B4]], align 4
// CHECK3-NEXT: [[AND7:%.*]] = and i32 [[TMP23]], [[TMP24]]
// CHECK3-NEXT: store i32 [[AND7]], i32* [[TMP2]], align 4
// CHECK3-NEXT: [[TMP25:%.*]] = load i32, i32* [[TMP5]], align 4
// CHECK3-NEXT: [[TMP26:%.*]] = load i32, i32* [[C5]], align 4
// CHECK3-NEXT: [[AND8:%.*]] = and i32 [[TMP25]], [[TMP26]]
// CHECK3-NEXT: store i32 [[AND8]], i32* [[TMP5]], align 4
// CHECK3-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB1]], i32 [[TMP18]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK3: .omp.reduction.case2:
// CHECK3-NEXT: [[TMP27:%.*]] = load i32, i32* [[A2]], align 4
// CHECK3-NEXT: [[TMP28:%.*]] = atomicrmw and i32* [[TMP4]], i32 [[TMP27]] monotonic, align 4
// CHECK3-NEXT: [[TMP29:%.*]] = load i32, i32* [[B4]], align 4
// CHECK3-NEXT: [[TMP30:%.*]] = atomicrmw and i32* [[TMP2]], i32 [[TMP29]] monotonic, align 4
// CHECK3-NEXT: [[TMP31:%.*]] = load i32, i32* [[C5]], align 4
// CHECK3-NEXT: [[TMP32:%.*]] = atomicrmw and i32* [[TMP5]], i32 [[TMP31]] monotonic, align 4
// CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK3: .omp.reduction.default:
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.4
// CHECK3-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR3]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
// CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
// CHECK3-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
// CHECK3-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
// CHECK3-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
// CHECK3-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [3 x i8*]*
// CHECK3-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
// CHECK3-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [3 x i8*]*
// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP5]], i64 0, i64 0
// CHECK3-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// CHECK3-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP3]], i64 0, i64 0
// CHECK3-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
// CHECK3-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP5]], i64 0, i64 1
// CHECK3-NEXT: [[TMP13:%.*]] = load i8*, i8** [[TMP12]], align 8
// CHECK3-NEXT: [[TMP14:%.*]] = bitcast i8* [[TMP13]] to i32*
// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP3]], i64 0, i64 1
// CHECK3-NEXT: [[TMP16:%.*]] = load i8*, i8** [[TMP15]], align 8
// CHECK3-NEXT: [[TMP17:%.*]] = bitcast i8* [[TMP16]] to i32*
// CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP5]], i64 0, i64 2
// CHECK3-NEXT: [[TMP19:%.*]] = load i8*, i8** [[TMP18]], align 8
// CHECK3-NEXT: [[TMP20:%.*]] = bitcast i8* [[TMP19]] to i32*
// CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP3]], i64 0, i64 2
// CHECK3-NEXT: [[TMP22:%.*]] = load i8*, i8** [[TMP21]], align 8
// CHECK3-NEXT: [[TMP23:%.*]] = bitcast i8* [[TMP22]] to i32*
// CHECK3-NEXT: [[TMP24:%.*]] = load i32, i32* [[TMP11]], align 4
// CHECK3-NEXT: [[TMP25:%.*]] = load i32, i32* [[TMP8]], align 4
// CHECK3-NEXT: [[AND:%.*]] = and i32 [[TMP24]], [[TMP25]]
// CHECK3-NEXT: store i32 [[AND]], i32* [[TMP11]], align 4
// CHECK3-NEXT: [[TMP26:%.*]] = load i32, i32* [[TMP17]], align 4
// CHECK3-NEXT: [[TMP27:%.*]] = load i32, i32* [[TMP14]], align 4
// CHECK3-NEXT: [[AND2:%.*]] = and i32 [[TMP26]], [[TMP27]]
// CHECK3-NEXT: store i32 [[AND2]], i32* [[TMP17]], align 4
// CHECK3-NEXT: [[TMP28:%.*]] = load i32, i32* [[TMP23]], align 4
// CHECK3-NEXT: [[TMP29:%.*]] = load i32, i32* [[TMP20]], align 4
// CHECK3-NEXT: [[AND3:%.*]] = and i32 [[TMP28]], [[TMP29]]
// CHECK3-NEXT: store i32 [[AND3]], i32* [[TMP23]], align 4
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..5
// CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[G:%.*]]) #[[ATTR1]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK3-NEXT: [[G_ADDR:%.*]] = alloca i32*, align 8
// CHECK3-NEXT: [[G1:%.*]] = alloca i32, align 128
// CHECK3-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON_1:%.*]], align 8
// CHECK3-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
// CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK3-NEXT: store i32* [[G]], i32** [[G_ADDR]], align 8
// CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[G_ADDR]], align 8
// CHECK3-NEXT: store i32 0, i32* [[G1]], align 128
// CHECK3-NEXT: store i32 1, i32* [[G1]], align 128
// CHECK3-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[REF_TMP]], i32 0, i32 0
// CHECK3-NEXT: store i32* [[G1]], i32** [[TMP1]], align 8
// CHECK3-NEXT: call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(%class.anon.1* nonnull align 8 dereferenceable(8) [[REF_TMP]])
// CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// CHECK3-NEXT: [[TMP3:%.*]] = bitcast i32* [[G1]] to i8*
// CHECK3-NEXT: store i8* [[TMP3]], i8** [[TMP2]], align 8
// CHECK3-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
// CHECK3-NEXT: [[TMP6:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// CHECK3-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 1, i64 8, i8* [[TMP6]], void (i8*, i8*)* @.omp.reduction.reduction_func.6, [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK3-NEXT: switch i32 [[TMP7]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// CHECK3-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
// CHECK3-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
// CHECK3-NEXT: ]
// CHECK3: .omp.reduction.case1:
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP0]], align 128
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[G1]], align 128
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], [[TMP9]]
// CHECK3-NEXT: store i32 [[ADD]], i32* [[TMP0]], align 128
// CHECK3-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK3: .omp.reduction.case2:
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[G1]], align 128
// CHECK3-NEXT: [[TMP11:%.*]] = atomicrmw add i32* [[TMP0]], i32 [[TMP10]] monotonic, align 4
// CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK3: .omp.reduction.default:
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.6
// CHECK3-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR3]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
// CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
// CHECK3-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
// CHECK3-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
// CHECK3-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
// CHECK3-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
// CHECK3-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
// CHECK3-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
// CHECK3-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// CHECK3-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
// CHECK3-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
// CHECK3-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
// CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 128
// CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP8]], align 128
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
// CHECK3-NEXT: store i32 [[ADD]], i32* [[TMP11]], align 128
// CHECK3-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@_Z14foo_array_sectPs
// CHECK4-SAME: (i16* [[X:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[X_ADDR:%.*]] = alloca i16*, align 8
// CHECK4-NEXT: store i16* [[X]], i16** [[X_ADDR]], align 8
// CHECK4-NEXT: [[TMP0:%.*]] = load i16*, i16** [[X_ADDR]], align 8
// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i16*)* @.omp_outlined. to void (i32*, i32*, ...)*), i16* [[TMP0]])
// CHECK4-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i16* [[X:%.*]]) #[[ATTR2:[0-9]+]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK4-NEXT: [[X_ADDR:%.*]] = alloca i16*, align 8
// CHECK4-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
// CHECK4-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
// CHECK4-NEXT: [[TMP:%.*]] = alloca i16*, align 8
// CHECK4-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x i8*], align 8
// CHECK4-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i16, align 2
// CHECK4-NEXT: [[_TMP13:%.*]] = alloca i16, align 2
// CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK4-NEXT: store i16* [[X]], i16** [[X_ADDR]], align 8
// CHECK4-NEXT: [[TMP0:%.*]] = load i16*, i16** [[X_ADDR]], align 8
// CHECK4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP0]], i64 0
// CHECK4-NEXT: [[TMP1:%.*]] = load i16*, i16** [[X_ADDR]], align 8
// CHECK4-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i16, i16* [[TMP1]], i64 0
// CHECK4-NEXT: [[TMP2:%.*]] = ptrtoint i16* [[ARRAYIDX1]] to i64
// CHECK4-NEXT: [[TMP3:%.*]] = ptrtoint i16* [[ARRAYIDX]] to i64
// CHECK4-NEXT: [[TMP4:%.*]] = sub i64 [[TMP2]], [[TMP3]]
// CHECK4-NEXT: [[TMP5:%.*]] = sdiv exact i64 [[TMP4]], ptrtoint (i16* getelementptr (i16, i16* null, i32 1) to i64)
// CHECK4-NEXT: [[TMP6:%.*]] = add nuw i64 [[TMP5]], 1
// CHECK4-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], ptrtoint (i16* getelementptr (i16, i16* null, i32 1) to i64)
// CHECK4-NEXT: [[TMP8:%.*]] = call i8* @llvm.stacksave()
// CHECK4-NEXT: store i8* [[TMP8]], i8** [[SAVED_STACK]], align 8
// CHECK4-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP6]], align 16
// CHECK4-NEXT: store i64 [[TMP6]], i64* [[__VLA_EXPR0]], align 8
// CHECK4-NEXT: [[TMP9:%.*]] = getelementptr i16, i16* [[VLA]], i64 [[TMP6]]
// CHECK4-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i16* [[VLA]], [[TMP9]]
// CHECK4-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]]
// CHECK4: omp.arrayinit.body:
// CHECK4-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i16* [ [[VLA]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ]
// CHECK4-NEXT: store i16 0, i16* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 2
// CHECK4-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i16, i16* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
// CHECK4-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i16* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP9]]
// CHECK4-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]]
// CHECK4: omp.arrayinit.done:
// CHECK4-NEXT: [[TMP10:%.*]] = load i16*, i16** [[X_ADDR]], align 8
// CHECK4-NEXT: [[TMP11:%.*]] = ptrtoint i16* [[TMP10]] to i64
// CHECK4-NEXT: [[TMP12:%.*]] = ptrtoint i16* [[ARRAYIDX]] to i64
// CHECK4-NEXT: [[TMP13:%.*]] = sub i64 [[TMP11]], [[TMP12]]
// CHECK4-NEXT: [[TMP14:%.*]] = sdiv exact i64 [[TMP13]], ptrtoint (i16* getelementptr (i16, i16* null, i32 1) to i64)
// CHECK4-NEXT: [[TMP15:%.*]] = getelementptr i16, i16* [[VLA]], i64 [[TMP14]]
// CHECK4-NEXT: store i16* [[TMP15]], i16** [[TMP]], align 8
// CHECK4-NEXT: [[TMP16:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// CHECK4-NEXT: [[TMP17:%.*]] = bitcast i16* [[VLA]] to i8*
// CHECK4-NEXT: store i8* [[TMP17]], i8** [[TMP16]], align 8
// CHECK4-NEXT: [[TMP18:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1
// CHECK4-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP6]] to i8*
// CHECK4-NEXT: store i8* [[TMP19]], i8** [[TMP18]], align 8
// CHECK4-NEXT: [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK4-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
// CHECK4-NEXT: [[TMP22:%.*]] = bitcast [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// CHECK4-NEXT: [[TMP23:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP21]], i32 1, i64 16, i8* [[TMP22]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK4-NEXT: switch i32 [[TMP23]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// CHECK4-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
// CHECK4-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
// CHECK4-NEXT: ]
// CHECK4: .omp.reduction.case1:
// CHECK4-NEXT: [[TMP24:%.*]] = getelementptr i16, i16* [[ARRAYIDX]], i64 [[TMP6]]
// CHECK4-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i16* [[ARRAYIDX]], [[TMP24]]
// CHECK4-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE7:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
// CHECK4: omp.arraycpy.body:
// CHECK4-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i16* [ [[VLA]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK4-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST2:%.*]] = phi i16* [ [[ARRAYIDX]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT5:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK4-NEXT: [[TMP25:%.*]] = load i16, i16* [[OMP_ARRAYCPY_DESTELEMENTPAST2]], align 2
// CHECK4-NEXT: [[CONV:%.*]] = sext i16 [[TMP25]] to i32
// CHECK4-NEXT: [[TMP26:%.*]] = load i16, i16* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 2
// CHECK4-NEXT: [[CONV3:%.*]] = sext i16 [[TMP26]] to i32
// CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CONV3]]
// CHECK4-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD]] to i16
// CHECK4-NEXT: store i16 [[CONV4]], i16* [[OMP_ARRAYCPY_DESTELEMENTPAST2]], align 2
// CHECK4-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT5]] = getelementptr i16, i16* [[OMP_ARRAYCPY_DESTELEMENTPAST2]], i32 1
// CHECK4-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i16, i16* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
// CHECK4-NEXT: [[OMP_ARRAYCPY_DONE6:%.*]] = icmp eq i16* [[OMP_ARRAYCPY_DEST_ELEMENT5]], [[TMP24]]
// CHECK4-NEXT: br i1 [[OMP_ARRAYCPY_DONE6]], label [[OMP_ARRAYCPY_DONE7]], label [[OMP_ARRAYCPY_BODY]]
// CHECK4: omp.arraycpy.done7:
// CHECK4-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK4-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK4: .omp.reduction.case2:
// CHECK4-NEXT: [[TMP27:%.*]] = getelementptr i16, i16* [[ARRAYIDX]], i64 [[TMP6]]
// CHECK4-NEXT: [[OMP_ARRAYCPY_ISEMPTY8:%.*]] = icmp eq i16* [[ARRAYIDX]], [[TMP27]]
// CHECK4-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY8]], label [[OMP_ARRAYCPY_DONE21:%.*]], label [[OMP_ARRAYCPY_BODY9:%.*]]
// CHECK4: omp.arraycpy.body9:
// CHECK4-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST10:%.*]] = phi i16* [ [[VLA]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT19:%.*]], [[ATOMIC_EXIT:%.*]] ]
// CHECK4-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST11:%.*]] = phi i16* [ [[ARRAYIDX]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT18:%.*]], [[ATOMIC_EXIT]] ]
// CHECK4-NEXT: [[TMP28:%.*]] = load i16, i16* [[OMP_ARRAYCPY_SRCELEMENTPAST10]], align 2
// CHECK4-NEXT: [[CONV12:%.*]] = sext i16 [[TMP28]] to i32
// CHECK4-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic i16, i16* [[OMP_ARRAYCPY_DESTELEMENTPAST11]] monotonic, align 2
// CHECK4-NEXT: br label [[ATOMIC_CONT:%.*]]
// CHECK4: atomic_cont:
// CHECK4-NEXT: [[TMP29:%.*]] = phi i16 [ [[ATOMIC_LOAD]], [[OMP_ARRAYCPY_BODY9]] ], [ [[TMP34:%.*]], [[ATOMIC_CONT]] ]
// CHECK4-NEXT: store i16 [[TMP29]], i16* [[_TMP13]], align 2
// CHECK4-NEXT: [[TMP30:%.*]] = load i16, i16* [[_TMP13]], align 2
// CHECK4-NEXT: [[CONV14:%.*]] = sext i16 [[TMP30]] to i32
// CHECK4-NEXT: [[TMP31:%.*]] = load i16, i16* [[OMP_ARRAYCPY_SRCELEMENTPAST10]], align 2
// CHECK4-NEXT: [[CONV15:%.*]] = sext i16 [[TMP31]] to i32
// CHECK4-NEXT: [[ADD16:%.*]] = add nsw i32 [[CONV14]], [[CONV15]]
// CHECK4-NEXT: [[CONV17:%.*]] = trunc i32 [[ADD16]] to i16
// CHECK4-NEXT: store i16 [[CONV17]], i16* [[ATOMIC_TEMP]], align 2
// CHECK4-NEXT: [[TMP32:%.*]] = load i16, i16* [[ATOMIC_TEMP]], align 2
// CHECK4-NEXT: [[TMP33:%.*]] = cmpxchg i16* [[OMP_ARRAYCPY_DESTELEMENTPAST11]], i16 [[TMP29]], i16 [[TMP32]] monotonic monotonic, align 2
// CHECK4-NEXT: [[TMP34]] = extractvalue { i16, i1 } [[TMP33]], 0
// CHECK4-NEXT: [[TMP35:%.*]] = extractvalue { i16, i1 } [[TMP33]], 1
// CHECK4-NEXT: br i1 [[TMP35]], label [[ATOMIC_EXIT]], label [[ATOMIC_CONT]]
// CHECK4: atomic_exit:
// CHECK4-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT18]] = getelementptr i16, i16* [[OMP_ARRAYCPY_DESTELEMENTPAST11]], i32 1
// CHECK4-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT19]] = getelementptr i16, i16* [[OMP_ARRAYCPY_SRCELEMENTPAST10]], i32 1
// CHECK4-NEXT: [[OMP_ARRAYCPY_DONE20:%.*]] = icmp eq i16* [[OMP_ARRAYCPY_DEST_ELEMENT18]], [[TMP27]]
// CHECK4-NEXT: br i1 [[OMP_ARRAYCPY_DONE20]], label [[OMP_ARRAYCPY_DONE21]], label [[OMP_ARRAYCPY_BODY9]]
// CHECK4: omp.arraycpy.done21:
// CHECK4-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK4: .omp.reduction.default:
// CHECK4-NEXT: [[TMP36:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
// CHECK4-NEXT: call void @llvm.stackrestore(i8* [[TMP36]])
// CHECK4-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func
// CHECK4-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR4:[0-9]+]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
// CHECK4-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
// CHECK4-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
// CHECK4-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
// CHECK4-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
// CHECK4-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [2 x i8*]*
// CHECK4-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
// CHECK4-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [2 x i8*]*
// CHECK4-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i64 0, i64 0
// CHECK4-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// CHECK4-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i16*
// CHECK4-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP3]], i64 0, i64 0
// CHECK4-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
// CHECK4-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i16*
// CHECK4-NEXT: [[TMP12:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP3]], i64 0, i64 1
// CHECK4-NEXT: [[TMP13:%.*]] = load i8*, i8** [[TMP12]], align 8
// CHECK4-NEXT: [[TMP14:%.*]] = ptrtoint i8* [[TMP13]] to i64
// CHECK4-NEXT: [[TMP15:%.*]] = getelementptr i16, i16* [[TMP11]], i64 [[TMP14]]
// CHECK4-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i16* [[TMP11]], [[TMP15]]
// CHECK4-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE4:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
// CHECK4: omp.arraycpy.body:
// CHECK4-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i16* [ [[TMP8]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK4-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i16* [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK4-NEXT: [[TMP16:%.*]] = load i16, i16* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 2
// CHECK4-NEXT: [[CONV:%.*]] = sext i16 [[TMP16]] to i32
// CHECK4-NEXT: [[TMP17:%.*]] = load i16, i16* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 2
// CHECK4-NEXT: [[CONV2:%.*]] = sext i16 [[TMP17]] to i32
// CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CONV2]]
// CHECK4-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD]] to i16
// CHECK4-NEXT: store i16 [[CONV3]], i16* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 2
// CHECK4-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i16, i16* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
// CHECK4-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i16, i16* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
// CHECK4-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i16* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP15]]
// CHECK4-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE4]], label [[OMP_ARRAYCPY_BODY]]
// CHECK4: omp.arraycpy.done4:
// CHECK4-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@main
// CHECK4-SAME: () #[[ATTR7:[0-9]+]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[SS:%.*]] = alloca [[STRUCT_SS:%.*]], align 8
// CHECK4-NEXT: store i32 0, i32* [[RETVAL]], align 4
// CHECK4-NEXT: call void @_ZN2SSC1ERi(%struct.SS* nonnull align 8 dereferenceable(16) [[SS]], i32* nonnull align 4 dereferenceable(4) @sivar)
// CHECK4-NEXT: [[TMP0:%.*]] = load i8*, i8** getelementptr inbounds ([[STRUCT___BLOCK_LITERAL_GENERIC:%.*]], %struct.__block_literal_generic* bitcast ({ i8**, i32, i32, i8*, %struct.__block_descriptor* }* @__block_literal_global to %struct.__block_literal_generic*), i32 0, i32 3), align 8
// CHECK4-NEXT: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to void (i8*)*
// CHECK4-NEXT: call void [[TMP1]](i8* bitcast ({ i8**, i32, i32, i8*, %struct.__block_descriptor* }* @__block_literal_global to i8*))
// CHECK4-NEXT: ret i32 0
//
//
// CHECK4-LABEL: define {{[^@]+}}@_ZN2SSC1ERi
// CHECK4-SAME: (%struct.SS* nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32* nonnull align 4 dereferenceable(4) [[D:%.*]]) unnamed_addr #[[ATTR8:[0-9]+]] align 2 {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK4-NEXT: [[D_ADDR:%.*]] = alloca i32*, align 8
// CHECK4-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK4-NEXT: store i32* [[D]], i32** [[D_ADDR]], align 8
// CHECK4-NEXT: [[THIS1:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK4-NEXT: [[TMP0:%.*]] = load i32*, i32** [[D_ADDR]], align 8
// CHECK4-NEXT: call void @_ZN2SSC2ERi(%struct.SS* nonnull align 8 dereferenceable(16) [[THIS1]], i32* nonnull align 4 dereferenceable(4) [[TMP0]])
// CHECK4-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@__main_block_invoke
// CHECK4-SAME: (i8* [[DOTBLOCK_DESCRIPTOR:%.*]]) #[[ATTR8]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[DOTBLOCK_DESCRIPTOR_ADDR:%.*]] = alloca i8*, align 8
// CHECK4-NEXT: [[BLOCK_ADDR:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>*, align 8
// CHECK4-NEXT: store i8* [[DOTBLOCK_DESCRIPTOR]], i8** [[DOTBLOCK_DESCRIPTOR_ADDR]], align 8
// CHECK4-NEXT: [[BLOCK:%.*]] = bitcast i8* [[DOTBLOCK_DESCRIPTOR]] to <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>*
// CHECK4-NEXT: store <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>* [[BLOCK]], <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>** [[BLOCK_ADDR]], align 8
// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32* @g)
// CHECK4-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[G:%.*]]) #[[ATTR2]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK4-NEXT: [[G_ADDR:%.*]] = alloca i32*, align 8
// CHECK4-NEXT: [[G1:%.*]] = alloca i32, align 128
// CHECK4-NEXT: [[BLOCK:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, [96 x i8], i32 }>, align 128
// CHECK4-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
// CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK4-NEXT: store i32* [[G]], i32** [[G_ADDR]], align 8
// CHECK4-NEXT: [[TMP0:%.*]] = load i32*, i32** [[G_ADDR]], align 8
// CHECK4-NEXT: store i32 0, i32* [[G1]], align 128
// CHECK4-NEXT: store i32 1, i32* [[G1]], align 128
// CHECK4-NEXT: [[BLOCK_ISA:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, [96 x i8], i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, [96 x i8], i32 }>* [[BLOCK]], i32 0, i32 0
// CHECK4-NEXT: store i8* bitcast (i8** @_NSConcreteStackBlock to i8*), i8** [[BLOCK_ISA]], align 128
// CHECK4-NEXT: [[BLOCK_FLAGS:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, [96 x i8], i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, [96 x i8], i32 }>* [[BLOCK]], i32 0, i32 1
// CHECK4-NEXT: store i32 1073741824, i32* [[BLOCK_FLAGS]], align 8
// CHECK4-NEXT: [[BLOCK_RESERVED:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, [96 x i8], i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, [96 x i8], i32 }>* [[BLOCK]], i32 0, i32 2
// CHECK4-NEXT: store i32 0, i32* [[BLOCK_RESERVED]], align 4
// CHECK4-NEXT: [[BLOCK_INVOKE:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, [96 x i8], i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, [96 x i8], i32 }>* [[BLOCK]], i32 0, i32 3
// CHECK4-NEXT: store i8* bitcast (void (i8*)* @g_block_invoke to i8*), i8** [[BLOCK_INVOKE]], align 16
// CHECK4-NEXT: [[BLOCK_DESCRIPTOR:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, [96 x i8], i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, [96 x i8], i32 }>* [[BLOCK]], i32 0, i32 4
// CHECK4-NEXT: store %struct.__block_descriptor* bitcast ({ i64, i64, i8*, i8* }* @__block_descriptor_tmp.2 to %struct.__block_descriptor*), %struct.__block_descriptor** [[BLOCK_DESCRIPTOR]], align 8
// CHECK4-NEXT: [[BLOCK_CAPTURED:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, [96 x i8], i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, [96 x i8], i32 }>* [[BLOCK]], i32 0, i32 6
// CHECK4-NEXT: [[TMP1:%.*]] = load volatile i32, i32* [[G1]], align 128
// CHECK4-NEXT: store volatile i32 [[TMP1]], i32* [[BLOCK_CAPTURED]], align 128
// CHECK4-NEXT: [[TMP2:%.*]] = bitcast <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, [96 x i8], i32 }>* [[BLOCK]] to void ()*
// CHECK4-NEXT: [[BLOCK_LITERAL:%.*]] = bitcast void ()* [[TMP2]] to %struct.__block_literal_generic*
// CHECK4-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT___BLOCK_LITERAL_GENERIC:%.*]], %struct.__block_literal_generic* [[BLOCK_LITERAL]], i32 0, i32 3
// CHECK4-NEXT: [[TMP4:%.*]] = bitcast %struct.__block_literal_generic* [[BLOCK_LITERAL]] to i8*
// CHECK4-NEXT: [[TMP5:%.*]] = load i8*, i8** [[TMP3]], align 8
// CHECK4-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP5]] to void (i8*)*
// CHECK4-NEXT: call void [[TMP6]](i8* [[TMP4]])
// CHECK4-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// CHECK4-NEXT: [[TMP8:%.*]] = bitcast i32* [[G1]] to i8*
// CHECK4-NEXT: store i8* [[TMP8]], i8** [[TMP7]], align 8
// CHECK4-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK4-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
// CHECK4-NEXT: [[TMP11:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// CHECK4-NEXT: [[TMP12:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 1, i64 8, i8* [[TMP11]], void (i8*, i8*)* @.omp.reduction.reduction_func.3, [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK4-NEXT: switch i32 [[TMP12]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// CHECK4-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
// CHECK4-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
// CHECK4-NEXT: ]
// CHECK4: .omp.reduction.case1:
// CHECK4-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP0]], align 128
// CHECK4-NEXT: [[TMP14:%.*]] = load i32, i32* [[G1]], align 128
// CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]]
// CHECK4-NEXT: store i32 [[ADD]], i32* [[TMP0]], align 128
// CHECK4-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK4-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK4: .omp.reduction.case2:
// CHECK4-NEXT: [[TMP15:%.*]] = load i32, i32* [[G1]], align 128
// CHECK4-NEXT: [[TMP16:%.*]] = atomicrmw add i32* [[TMP0]], i32 [[TMP15]] monotonic, align 4
// CHECK4-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK4: .omp.reduction.default:
// CHECK4-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@g_block_invoke
// CHECK4-SAME: (i8* [[DOTBLOCK_DESCRIPTOR:%.*]]) #[[ATTR8]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[DOTBLOCK_DESCRIPTOR_ADDR:%.*]] = alloca i8*, align 8
// CHECK4-NEXT: [[BLOCK_ADDR:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, [96 x i8], i32 }>*, align 8
// CHECK4-NEXT: store i8* [[DOTBLOCK_DESCRIPTOR]], i8** [[DOTBLOCK_DESCRIPTOR_ADDR]], align 8
// CHECK4-NEXT: [[BLOCK:%.*]] = bitcast i8* [[DOTBLOCK_DESCRIPTOR]] to <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, [96 x i8], i32 }>*
// CHECK4-NEXT: store <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, [96 x i8], i32 }>* [[BLOCK]], <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, [96 x i8], i32 }>** [[BLOCK_ADDR]], align 8
// CHECK4-NEXT: [[BLOCK_CAPTURE_ADDR:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, [96 x i8], i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, [96 x i8], i32 }>* [[BLOCK]], i32 0, i32 6
// CHECK4-NEXT: store i32 2, i32* [[BLOCK_CAPTURE_ADDR]], align 128
// CHECK4-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.3
// CHECK4-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR4]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
// CHECK4-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
// CHECK4-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
// CHECK4-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
// CHECK4-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
// CHECK4-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
// CHECK4-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
// CHECK4-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
// CHECK4-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
// CHECK4-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// CHECK4-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
// CHECK4-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
// CHECK4-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
// CHECK4-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
// CHECK4-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 128
// CHECK4-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP8]], align 128
// CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
// CHECK4-NEXT: store i32 [[ADD]], i32* [[TMP11]], align 128
// CHECK4-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@_ZN2SSC2ERi
// CHECK4-SAME: (%struct.SS* nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32* nonnull align 4 dereferenceable(4) [[D:%.*]]) unnamed_addr #[[ATTR8]] align 2 {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK4-NEXT: [[D_ADDR:%.*]] = alloca i32*, align 8
// CHECK4-NEXT: [[A2:%.*]] = alloca i32*, align 8
// CHECK4-NEXT: [[B4:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[C5:%.*]] = alloca i32*, align 8
// CHECK4-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK4-NEXT: store i32* [[D]], i32** [[D_ADDR]], align 8
// CHECK4-NEXT: [[THIS1:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK4-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[THIS1]], i32 0, i32 0
// CHECK4-NEXT: store i32 0, i32* [[A]], align 8
// CHECK4-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 1
// CHECK4-NEXT: [[BF_LOAD:%.*]] = load i8, i8* [[B]], align 4
// CHECK4-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -16
// CHECK4-NEXT: store i8 [[BF_CLEAR]], i8* [[B]], align 4
// CHECK4-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 2
// CHECK4-NEXT: [[TMP0:%.*]] = load i32*, i32** [[D_ADDR]], align 8
// CHECK4-NEXT: store i32* [[TMP0]], i32** [[C]], align 8
// CHECK4-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0
// CHECK4-NEXT: store i32* [[A3]], i32** [[A2]], align 8
// CHECK4-NEXT: [[C6:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 2
// CHECK4-NEXT: [[TMP1:%.*]] = load i32*, i32** [[C6]], align 8
// CHECK4-NEXT: store i32* [[TMP1]], i32** [[C5]], align 8
// CHECK4-NEXT: [[TMP2:%.*]] = load i32*, i32** [[A2]], align 8
// CHECK4-NEXT: [[TMP3:%.*]] = load i32*, i32** [[C5]], align 8
// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*, i32*, i32*, i32*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), %struct.SS* [[THIS1]], i32* [[TMP2]], i32* [[B4]], i32* [[TMP3]])
// CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[B4]], align 4
// CHECK4-NEXT: [[B7:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 1
// CHECK4-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i8
// CHECK4-NEXT: [[BF_LOAD8:%.*]] = load i8, i8* [[B7]], align 4
// CHECK4-NEXT: [[BF_VALUE:%.*]] = and i8 [[TMP5]], 15
// CHECK4-NEXT: [[BF_CLEAR9:%.*]] = and i8 [[BF_LOAD8]], -16
// CHECK4-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR9]], [[BF_VALUE]]
// CHECK4-NEXT: store i8 [[BF_SET]], i8* [[B7]], align 4
// CHECK4-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..4
// CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i32* nonnull align 4 dereferenceable(4) [[B:%.*]], i32* nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK4-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK4-NEXT: [[B_ADDR:%.*]] = alloca i32*, align 8
// CHECK4-NEXT: [[C_ADDR:%.*]] = alloca i32*, align 8
// CHECK4-NEXT: [[TMP:%.*]] = alloca i32*, align 8
// CHECK4-NEXT: [[_TMP1:%.*]] = alloca i32*, align 8
// CHECK4-NEXT: [[A2:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[_TMP3:%.*]] = alloca i32*, align 8
// CHECK4-NEXT: [[B4:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[C5:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[_TMP6:%.*]] = alloca i32*, align 8
// CHECK4-NEXT: [[BLOCK:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %struct.SS*, i32*, i32*, i32 }>, align 8
// CHECK4-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [3 x i8*], align 8
// CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK4-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK4-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK4-NEXT: store i32* [[B]], i32** [[B_ADDR]], align 8
// CHECK4-NEXT: store i32* [[C]], i32** [[C_ADDR]], align 8
// CHECK4-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK4-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK4-NEXT: [[TMP2:%.*]] = load i32*, i32** [[B_ADDR]], align 8
// CHECK4-NEXT: [[TMP3:%.*]] = load i32*, i32** [[C_ADDR]], align 8
// CHECK4-NEXT: store i32* [[TMP1]], i32** [[TMP]], align 8
// CHECK4-NEXT: store i32* [[TMP3]], i32** [[_TMP1]], align 8
// CHECK4-NEXT: [[TMP4:%.*]] = load i32*, i32** [[TMP]], align 8
// CHECK4-NEXT: store i32 0, i32* [[A2]], align 4
// CHECK4-NEXT: store i32* [[A2]], i32** [[_TMP3]], align 8
// CHECK4-NEXT: store i32 0, i32* [[B4]], align 4
// CHECK4-NEXT: [[TMP5:%.*]] = load i32*, i32** [[_TMP1]], align 8
// CHECK4-NEXT: store i32 0, i32* [[C5]], align 4
// CHECK4-NEXT: store i32* [[C5]], i32** [[_TMP6]], align 8
// CHECK4-NEXT: [[BLOCK_ISA:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %struct.SS*, i32*, i32*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %struct.SS*, i32*, i32*, i32 }>* [[BLOCK]], i32 0, i32 0
// CHECK4-NEXT: store i8* bitcast (i8** @_NSConcreteStackBlock to i8*), i8** [[BLOCK_ISA]], align 8
// CHECK4-NEXT: [[BLOCK_FLAGS:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %struct.SS*, i32*, i32*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %struct.SS*, i32*, i32*, i32 }>* [[BLOCK]], i32 0, i32 1
// CHECK4-NEXT: store i32 1073741824, i32* [[BLOCK_FLAGS]], align 8
// CHECK4-NEXT: [[BLOCK_RESERVED:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %struct.SS*, i32*, i32*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %struct.SS*, i32*, i32*, i32 }>* [[BLOCK]], i32 0, i32 2
// CHECK4-NEXT: store i32 0, i32* [[BLOCK_RESERVED]], align 4
// CHECK4-NEXT: [[BLOCK_INVOKE:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %struct.SS*, i32*, i32*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %struct.SS*, i32*, i32*, i32 }>* [[BLOCK]], i32 0, i32 3
// CHECK4-NEXT: store i8* bitcast (void (i8*)* @g_block_invoke_2 to i8*), i8** [[BLOCK_INVOKE]], align 8
// CHECK4-NEXT: [[BLOCK_DESCRIPTOR:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %struct.SS*, i32*, i32*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %struct.SS*, i32*, i32*, i32 }>* [[BLOCK]], i32 0, i32 4
// CHECK4-NEXT: store %struct.__block_descriptor* bitcast ({ i64, i64, i8*, i8* }* @__block_descriptor_tmp.7 to %struct.__block_descriptor*), %struct.__block_descriptor** [[BLOCK_DESCRIPTOR]], align 8
// CHECK4-NEXT: [[BLOCK_CAPTURED_THIS_ADDR:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %struct.SS*, i32*, i32*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %struct.SS*, i32*, i32*, i32 }>* [[BLOCK]], i32 0, i32 5
// CHECK4-NEXT: store %struct.SS* [[TMP0]], %struct.SS** [[BLOCK_CAPTURED_THIS_ADDR]], align 8
// CHECK4-NEXT: [[BLOCK_CAPTURED:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %struct.SS*, i32*, i32*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %struct.SS*, i32*, i32*, i32 }>* [[BLOCK]], i32 0, i32 6
// CHECK4-NEXT: [[TMP6:%.*]] = load i32*, i32** [[_TMP3]], align 8
// CHECK4-NEXT: store i32* [[TMP6]], i32** [[BLOCK_CAPTURED]], align 8
// CHECK4-NEXT: [[BLOCK_CAPTURED7:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %struct.SS*, i32*, i32*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %struct.SS*, i32*, i32*, i32 }>* [[BLOCK]], i32 0, i32 8
// CHECK4-NEXT: [[TMP7:%.*]] = load i32, i32* [[B4]], align 4
// CHECK4-NEXT: store i32 [[TMP7]], i32* [[BLOCK_CAPTURED7]], align 8
// CHECK4-NEXT: [[BLOCK_CAPTURED8:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %struct.SS*, i32*, i32*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %struct.SS*, i32*, i32*, i32 }>* [[BLOCK]], i32 0, i32 7
// CHECK4-NEXT: [[TMP8:%.*]] = load i32*, i32** [[_TMP6]], align 8
// CHECK4-NEXT: store i32* [[TMP8]], i32** [[BLOCK_CAPTURED8]], align 8
// CHECK4-NEXT: [[TMP9:%.*]] = bitcast <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %struct.SS*, i32*, i32*, i32 }>* [[BLOCK]] to void ()*
// CHECK4-NEXT: [[BLOCK_LITERAL:%.*]] = bitcast void ()* [[TMP9]] to %struct.__block_literal_generic*
// CHECK4-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___BLOCK_LITERAL_GENERIC:%.*]], %struct.__block_literal_generic* [[BLOCK_LITERAL]], i32 0, i32 3
// CHECK4-NEXT: [[TMP11:%.*]] = bitcast %struct.__block_literal_generic* [[BLOCK_LITERAL]] to i8*
// CHECK4-NEXT: [[TMP12:%.*]] = load i8*, i8** [[TMP10]], align 8
// CHECK4-NEXT: [[TMP13:%.*]] = bitcast i8* [[TMP12]] to void (i8*)*
// CHECK4-NEXT: call void [[TMP13]](i8* [[TMP11]])
// CHECK4-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// CHECK4-NEXT: [[TMP15:%.*]] = bitcast i32* [[A2]] to i8*
// CHECK4-NEXT: store i8* [[TMP15]], i8** [[TMP14]], align 8
// CHECK4-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1
// CHECK4-NEXT: [[TMP17:%.*]] = bitcast i32* [[B4]] to i8*
// CHECK4-NEXT: store i8* [[TMP17]], i8** [[TMP16]], align 8
// CHECK4-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 2
// CHECK4-NEXT: [[TMP19:%.*]] = bitcast i32* [[C5]] to i8*
// CHECK4-NEXT: store i8* [[TMP19]], i8** [[TMP18]], align 8
// CHECK4-NEXT: [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK4-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4
// CHECK4-NEXT: [[TMP22:%.*]] = bitcast [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// CHECK4-NEXT: [[TMP23:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]], i32 3, i64 24, i8* [[TMP22]], void (i8*, i8*)* @.omp.reduction.reduction_func.8, [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK4-NEXT: switch i32 [[TMP23]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// CHECK4-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
// CHECK4-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
// CHECK4-NEXT: ]
// CHECK4: .omp.reduction.case1:
// CHECK4-NEXT: [[TMP24:%.*]] = load i32, i32* [[TMP4]], align 4
// CHECK4-NEXT: [[TMP25:%.*]] = load i32, i32* [[A2]], align 4
// CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP24]], [[TMP25]]
// CHECK4-NEXT: store i32 [[ADD]], i32* [[TMP4]], align 4
// CHECK4-NEXT: [[TMP26:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK4-NEXT: [[TMP27:%.*]] = load i32, i32* [[B4]], align 4
// CHECK4-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP26]], [[TMP27]]
// CHECK4-NEXT: store i32 [[ADD9]], i32* [[TMP2]], align 4
// CHECK4-NEXT: [[TMP28:%.*]] = load i32, i32* [[TMP5]], align 4
// CHECK4-NEXT: [[TMP29:%.*]] = load i32, i32* [[C5]], align 4
// CHECK4-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP28]], [[TMP29]]
// CHECK4-NEXT: store i32 [[ADD10]], i32* [[TMP5]], align 4
// CHECK4-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK4-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK4: .omp.reduction.case2:
// CHECK4-NEXT: [[TMP30:%.*]] = load i32, i32* [[A2]], align 4
// CHECK4-NEXT: [[TMP31:%.*]] = atomicrmw add i32* [[TMP4]], i32 [[TMP30]] monotonic, align 4
// CHECK4-NEXT: [[TMP32:%.*]] = load i32, i32* [[B4]], align 4
// CHECK4-NEXT: [[TMP33:%.*]] = atomicrmw add i32* [[TMP2]], i32 [[TMP32]] monotonic, align 4
// CHECK4-NEXT: [[TMP34:%.*]] = load i32, i32* [[C5]], align 4
// CHECK4-NEXT: [[TMP35:%.*]] = atomicrmw add i32* [[TMP5]], i32 [[TMP34]] monotonic, align 4
// CHECK4-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK4: .omp.reduction.default:
// CHECK4-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@g_block_invoke_2
// CHECK4-SAME: (i8* [[DOTBLOCK_DESCRIPTOR:%.*]]) #[[ATTR8]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[DOTBLOCK_DESCRIPTOR_ADDR:%.*]] = alloca i8*, align 8
// CHECK4-NEXT: [[BLOCK_ADDR:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %struct.SS*, i32*, i32*, i32 }>*, align 8
// CHECK4-NEXT: store i8* [[DOTBLOCK_DESCRIPTOR]], i8** [[DOTBLOCK_DESCRIPTOR_ADDR]], align 8
// CHECK4-NEXT: [[BLOCK:%.*]] = bitcast i8* [[DOTBLOCK_DESCRIPTOR]] to <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %struct.SS*, i32*, i32*, i32 }>*
// CHECK4-NEXT: store <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %struct.SS*, i32*, i32*, i32 }>* [[BLOCK]], <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %struct.SS*, i32*, i32*, i32 }>** [[BLOCK_ADDR]], align 8
// CHECK4-NEXT: [[BLOCK_CAPTURED_THIS:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %struct.SS*, i32*, i32*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %struct.SS*, i32*, i32*, i32 }>* [[BLOCK]], i32 0, i32 5
// CHECK4-NEXT: [[THIS:%.*]] = load %struct.SS*, %struct.SS** [[BLOCK_CAPTURED_THIS]], align 8
// CHECK4-NEXT: [[BLOCK_CAPTURE_ADDR:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %struct.SS*, i32*, i32*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %struct.SS*, i32*, i32*, i32 }>* [[BLOCK]], i32 0, i32 6
// CHECK4-NEXT: [[TMP0:%.*]] = load i32*, i32** [[BLOCK_CAPTURE_ADDR]], align 8
// CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK4-NEXT: [[INC:%.*]] = add nsw i32 [[TMP1]], 1
// CHECK4-NEXT: store i32 [[INC]], i32* [[TMP0]], align 4
// CHECK4-NEXT: [[BLOCK_CAPTURE_ADDR1:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %struct.SS*, i32*, i32*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %struct.SS*, i32*, i32*, i32 }>* [[BLOCK]], i32 0, i32 8
// CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[BLOCK_CAPTURE_ADDR1]], align 8
// CHECK4-NEXT: [[DEC:%.*]] = add nsw i32 [[TMP2]], -1
// CHECK4-NEXT: store i32 [[DEC]], i32* [[BLOCK_CAPTURE_ADDR1]], align 8
// CHECK4-NEXT: [[BLOCK_CAPTURE_ADDR2:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %struct.SS*, i32*, i32*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %struct.SS*, i32*, i32*, i32 }>* [[BLOCK]], i32 0, i32 7
// CHECK4-NEXT: [[TMP3:%.*]] = load i32*, i32** [[BLOCK_CAPTURE_ADDR2]], align 8
// CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
// CHECK4-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP4]], 1
// CHECK4-NEXT: store i32 [[DIV]], i32* [[TMP3]], align 4
// CHECK4-NEXT: [[BLOCK_CAPTURE_ADDR3:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %struct.SS*, i32*, i32*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %struct.SS*, i32*, i32*, i32 }>* [[BLOCK]], i32 0, i32 6
// CHECK4-NEXT: [[TMP5:%.*]] = load i32*, i32** [[BLOCK_CAPTURE_ADDR3]], align 8
// CHECK4-NEXT: [[BLOCK_CAPTURE_ADDR4:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %struct.SS*, i32*, i32*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %struct.SS*, i32*, i32*, i32 }>* [[BLOCK]], i32 0, i32 8
// CHECK4-NEXT: [[BLOCK_CAPTURE_ADDR5:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %struct.SS*, i32*, i32*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %struct.SS*, i32*, i32*, i32 }>* [[BLOCK]], i32 0, i32 7
// CHECK4-NEXT: [[TMP6:%.*]] = load i32*, i32** [[BLOCK_CAPTURE_ADDR5]], align 8
// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*, i32*, i32*, i32*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), %struct.SS* [[THIS]], i32* [[TMP5]], i32* [[BLOCK_CAPTURE_ADDR4]], i32* [[TMP6]])
// CHECK4-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..5
// CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i32* nonnull align 4 dereferenceable(4) [[B:%.*]], i32* nonnull align 4 dereferenceable(4) [[C:%.*]]) #[[ATTR2]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
// CHECK4-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK4-NEXT: [[B_ADDR:%.*]] = alloca i32*, align 8
// CHECK4-NEXT: [[C_ADDR:%.*]] = alloca i32*, align 8
// CHECK4-NEXT: [[TMP:%.*]] = alloca i32*, align 8
// CHECK4-NEXT: [[_TMP1:%.*]] = alloca i32*, align 8
// CHECK4-NEXT: [[A2:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[_TMP3:%.*]] = alloca i32*, align 8
// CHECK4-NEXT: [[B4:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[C5:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[_TMP6:%.*]] = alloca i32*, align 8
// CHECK4-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [3 x i8*], align 8
// CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK4-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
// CHECK4-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK4-NEXT: store i32* [[B]], i32** [[B_ADDR]], align 8
// CHECK4-NEXT: store i32* [[C]], i32** [[C_ADDR]], align 8
// CHECK4-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
// CHECK4-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK4-NEXT: [[TMP2:%.*]] = load i32*, i32** [[B_ADDR]], align 8
// CHECK4-NEXT: [[TMP3:%.*]] = load i32*, i32** [[C_ADDR]], align 8
// CHECK4-NEXT: store i32* [[TMP1]], i32** [[TMP]], align 8
// CHECK4-NEXT: store i32* [[TMP3]], i32** [[_TMP1]], align 8
// CHECK4-NEXT: [[TMP4:%.*]] = load i32*, i32** [[TMP]], align 8
// CHECK4-NEXT: store i32 0, i32* [[A2]], align 4
// CHECK4-NEXT: store i32* [[A2]], i32** [[_TMP3]], align 8
// CHECK4-NEXT: store i32 0, i32* [[B4]], align 4
// CHECK4-NEXT: [[TMP5:%.*]] = load i32*, i32** [[_TMP1]], align 8
// CHECK4-NEXT: store i32 0, i32* [[C5]], align 4
// CHECK4-NEXT: store i32* [[C5]], i32** [[_TMP6]], align 8
// CHECK4-NEXT: [[TMP6:%.*]] = load i32*, i32** [[_TMP3]], align 8
// CHECK4-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
// CHECK4-NEXT: [[INC:%.*]] = add nsw i32 [[TMP7]], 1
// CHECK4-NEXT: store i32 [[INC]], i32* [[TMP6]], align 4
// CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[B4]], align 4
// CHECK4-NEXT: [[DEC:%.*]] = add nsw i32 [[TMP8]], -1
// CHECK4-NEXT: store i32 [[DEC]], i32* [[B4]], align 4
// CHECK4-NEXT: [[TMP9:%.*]] = load i32*, i32** [[_TMP6]], align 8
// CHECK4-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4
// CHECK4-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP10]], 1
// CHECK4-NEXT: store i32 [[DIV]], i32* [[TMP9]], align 4
// CHECK4-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// CHECK4-NEXT: [[TMP12:%.*]] = bitcast i32* [[A2]] to i8*
// CHECK4-NEXT: store i8* [[TMP12]], i8** [[TMP11]], align 8
// CHECK4-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1
// CHECK4-NEXT: [[TMP14:%.*]] = bitcast i32* [[B4]] to i8*
// CHECK4-NEXT: store i8* [[TMP14]], i8** [[TMP13]], align 8
// CHECK4-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 2
// CHECK4-NEXT: [[TMP16:%.*]] = bitcast i32* [[C5]] to i8*
// CHECK4-NEXT: store i8* [[TMP16]], i8** [[TMP15]], align 8
// CHECK4-NEXT: [[TMP17:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK4-NEXT: [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 4
// CHECK4-NEXT: [[TMP19:%.*]] = bitcast [3 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// CHECK4-NEXT: [[TMP20:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB1]], i32 [[TMP18]], i32 3, i64 24, i8* [[TMP19]], void (i8*, i8*)* @.omp.reduction.reduction_func.6, [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK4-NEXT: switch i32 [[TMP20]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// CHECK4-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
// CHECK4-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
// CHECK4-NEXT: ]
// CHECK4: .omp.reduction.case1:
// CHECK4-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP4]], align 4
// CHECK4-NEXT: [[TMP22:%.*]] = load i32, i32* [[A2]], align 4
// CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
// CHECK4-NEXT: store i32 [[ADD]], i32* [[TMP4]], align 4
// CHECK4-NEXT: [[TMP23:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK4-NEXT: [[TMP24:%.*]] = load i32, i32* [[B4]], align 4
// CHECK4-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
// CHECK4-NEXT: store i32 [[ADD7]], i32* [[TMP2]], align 4
// CHECK4-NEXT: [[TMP25:%.*]] = load i32, i32* [[TMP5]], align 4
// CHECK4-NEXT: [[TMP26:%.*]] = load i32, i32* [[C5]], align 4
// CHECK4-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
// CHECK4-NEXT: store i32 [[ADD8]], i32* [[TMP5]], align 4
// CHECK4-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB1]], i32 [[TMP18]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK4-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK4: .omp.reduction.case2:
// CHECK4-NEXT: [[TMP27:%.*]] = load i32, i32* [[A2]], align 4
// CHECK4-NEXT: [[TMP28:%.*]] = atomicrmw add i32* [[TMP4]], i32 [[TMP27]] monotonic, align 4
// CHECK4-NEXT: [[TMP29:%.*]] = load i32, i32* [[B4]], align 4
// CHECK4-NEXT: [[TMP30:%.*]] = atomicrmw add i32* [[TMP2]], i32 [[TMP29]] monotonic, align 4
// CHECK4-NEXT: [[TMP31:%.*]] = load i32, i32* [[C5]], align 4
// CHECK4-NEXT: [[TMP32:%.*]] = atomicrmw add i32* [[TMP5]], i32 [[TMP31]] monotonic, align 4
// CHECK4-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK4: .omp.reduction.default:
// CHECK4-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.6
// CHECK4-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR4]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
// CHECK4-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
// CHECK4-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
// CHECK4-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
// CHECK4-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
// CHECK4-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [3 x i8*]*
// CHECK4-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
// CHECK4-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [3 x i8*]*
// CHECK4-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP5]], i64 0, i64 0
// CHECK4-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// CHECK4-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
// CHECK4-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP3]], i64 0, i64 0
// CHECK4-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
// CHECK4-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
// CHECK4-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP5]], i64 0, i64 1
// CHECK4-NEXT: [[TMP13:%.*]] = load i8*, i8** [[TMP12]], align 8
// CHECK4-NEXT: [[TMP14:%.*]] = bitcast i8* [[TMP13]] to i32*
// CHECK4-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP3]], i64 0, i64 1
// CHECK4-NEXT: [[TMP16:%.*]] = load i8*, i8** [[TMP15]], align 8
// CHECK4-NEXT: [[TMP17:%.*]] = bitcast i8* [[TMP16]] to i32*
// CHECK4-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP5]], i64 0, i64 2
// CHECK4-NEXT: [[TMP19:%.*]] = load i8*, i8** [[TMP18]], align 8
// CHECK4-NEXT: [[TMP20:%.*]] = bitcast i8* [[TMP19]] to i32*
// CHECK4-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP3]], i64 0, i64 2
// CHECK4-NEXT: [[TMP22:%.*]] = load i8*, i8** [[TMP21]], align 8
// CHECK4-NEXT: [[TMP23:%.*]] = bitcast i8* [[TMP22]] to i32*
// CHECK4-NEXT: [[TMP24:%.*]] = load i32, i32* [[TMP11]], align 4
// CHECK4-NEXT: [[TMP25:%.*]] = load i32, i32* [[TMP8]], align 4
// CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP24]], [[TMP25]]
// CHECK4-NEXT: store i32 [[ADD]], i32* [[TMP11]], align 4
// CHECK4-NEXT: [[TMP26:%.*]] = load i32, i32* [[TMP17]], align 4
// CHECK4-NEXT: [[TMP27:%.*]] = load i32, i32* [[TMP14]], align 4
// CHECK4-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP26]], [[TMP27]]
// CHECK4-NEXT: store i32 [[ADD2]], i32* [[TMP17]], align 4
// CHECK4-NEXT: [[TMP28:%.*]] = load i32, i32* [[TMP23]], align 4
// CHECK4-NEXT: [[TMP29:%.*]] = load i32, i32* [[TMP20]], align 4
// CHECK4-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP28]], [[TMP29]]
// CHECK4-NEXT: store i32 [[ADD3]], i32* [[TMP23]], align 4
// CHECK4-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.8
// CHECK4-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR4]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
// CHECK4-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
// CHECK4-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
// CHECK4-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
// CHECK4-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
// CHECK4-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [3 x i8*]*
// CHECK4-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
// CHECK4-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [3 x i8*]*
// CHECK4-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP5]], i64 0, i64 0
// CHECK4-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// CHECK4-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
// CHECK4-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP3]], i64 0, i64 0
// CHECK4-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
// CHECK4-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
// CHECK4-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP5]], i64 0, i64 1
// CHECK4-NEXT: [[TMP13:%.*]] = load i8*, i8** [[TMP12]], align 8
// CHECK4-NEXT: [[TMP14:%.*]] = bitcast i8* [[TMP13]] to i32*
// CHECK4-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP3]], i64 0, i64 1
// CHECK4-NEXT: [[TMP16:%.*]] = load i8*, i8** [[TMP15]], align 8
// CHECK4-NEXT: [[TMP17:%.*]] = bitcast i8* [[TMP16]] to i32*
// CHECK4-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP5]], i64 0, i64 2
// CHECK4-NEXT: [[TMP19:%.*]] = load i8*, i8** [[TMP18]], align 8
// CHECK4-NEXT: [[TMP20:%.*]] = bitcast i8* [[TMP19]] to i32*
// CHECK4-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP3]], i64 0, i64 2
// CHECK4-NEXT: [[TMP22:%.*]] = load i8*, i8** [[TMP21]], align 8
// CHECK4-NEXT: [[TMP23:%.*]] = bitcast i8* [[TMP22]] to i32*
// CHECK4-NEXT: [[TMP24:%.*]] = load i32, i32* [[TMP11]], align 4
// CHECK4-NEXT: [[TMP25:%.*]] = load i32, i32* [[TMP8]], align 4
// CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP24]], [[TMP25]]
// CHECK4-NEXT: store i32 [[ADD]], i32* [[TMP11]], align 4
// CHECK4-NEXT: [[TMP26:%.*]] = load i32, i32* [[TMP17]], align 4
// CHECK4-NEXT: [[TMP27:%.*]] = load i32, i32* [[TMP14]], align 4
// CHECK4-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP26]], [[TMP27]]
// CHECK4-NEXT: store i32 [[ADD2]], i32* [[TMP17]], align 4
// CHECK4-NEXT: [[TMP28:%.*]] = load i32, i32* [[TMP23]], align 4
// CHECK4-NEXT: [[TMP29:%.*]] = load i32, i32* [[TMP20]], align 4
// CHECK4-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP28]], [[TMP29]]
// CHECK4-NEXT: store i32 [[ADD3]], i32* [[TMP23]], align 4
// CHECK4-NEXT: ret void
//
//