| ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes |
| ; RUN: opt -S -passes='attributor' -aa-pipeline='basic-aa' -attributor-disable=false -attributor-max-iterations-verify -attributor-max-iterations=2 < %s | FileCheck %s |
| ; Test that we only promote arguments when the caller/callee have compatible |
| ; function attrubtes. |
| |
| target triple = "x86_64-unknown-linux-gnu" |
| |
| ; This should promote |
| define internal fastcc void @callee_avx512_legal512_prefer512_call_avx512_legal512_prefer512(<8 x i64>* %arg, <8 x i64>* readonly %arg1) #0 { |
| ; CHECK-LABEL: define {{[^@]+}}@callee_avx512_legal512_prefer512_call_avx512_legal512_prefer512 |
| ; CHECK-SAME: (<8 x i64>* noalias nocapture nofree nonnull writeonly align 64 dereferenceable(64) [[ARG:%.*]], <8 x i64> [[TMP0:%.*]]) |
| ; CHECK-NEXT: bb: |
| ; CHECK-NEXT: [[ARG1_PRIV:%.*]] = alloca <8 x i64> |
| ; CHECK-NEXT: store <8 x i64> [[TMP0]], <8 x i64>* [[ARG1_PRIV]] |
| ; CHECK-NEXT: [[TMP:%.*]] = load <8 x i64>, <8 x i64>* [[ARG1_PRIV]], align 64 |
| ; CHECK-NEXT: store <8 x i64> [[TMP]], <8 x i64>* [[ARG]], align 64 |
| ; CHECK-NEXT: ret void |
| ; |
| bb: |
| %tmp = load <8 x i64>, <8 x i64>* %arg1 |
| store <8 x i64> %tmp, <8 x i64>* %arg |
| ret void |
| } |
| |
| define void @avx512_legal512_prefer512_call_avx512_legal512_prefer512(<8 x i64>* %arg) #0 { |
| ; CHECK-LABEL: define {{[^@]+}}@avx512_legal512_prefer512_call_avx512_legal512_prefer512 |
| ; CHECK-SAME: (<8 x i64>* nocapture writeonly [[ARG:%.*]]) |
| ; CHECK-NEXT: bb: |
| ; CHECK-NEXT: [[TMP:%.*]] = alloca <8 x i64>, align 32 |
| ; CHECK-NEXT: [[TMP2:%.*]] = alloca <8 x i64>, align 32 |
| ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i64>* [[TMP]] to i8* |
| ; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* nonnull writeonly align 32 dereferenceable(64) [[TMP3]], i8 0, i64 32, i1 false) |
| ; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i64>, <8 x i64>* [[TMP]], align 1 |
| ; CHECK-NEXT: call fastcc void @callee_avx512_legal512_prefer512_call_avx512_legal512_prefer512(<8 x i64>* noalias nocapture nofree nonnull writeonly align 64 dereferenceable(64) [[TMP2]], <8 x i64> [[TMP0]]) |
| ; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, <8 x i64>* [[TMP2]], align 64 |
| ; CHECK-NEXT: store <8 x i64> [[TMP4]], <8 x i64>* [[ARG]], align 2 |
| ; CHECK-NEXT: ret void |
| ; |
| bb: |
| %tmp = alloca <8 x i64>, align 32 |
| %tmp2 = alloca <8 x i64>, align 32 |
| %tmp3 = bitcast <8 x i64>* %tmp to i8* |
| call void @llvm.memset.p0i8.i64(i8* align 32 %tmp3, i8 0, i64 32, i1 false) |
| call fastcc void @callee_avx512_legal512_prefer512_call_avx512_legal512_prefer512(<8 x i64>* %tmp2, <8 x i64>* %tmp) |
| %tmp4 = load <8 x i64>, <8 x i64>* %tmp2, align 32 |
| store <8 x i64> %tmp4, <8 x i64>* %arg, align 2 |
| ret void |
| } |
| |
| ; This should promote |
| define internal fastcc void @callee_avx512_legal512_prefer256_call_avx512_legal512_prefer256(<8 x i64>* %arg, <8 x i64>* readonly %arg1) #1 { |
| ; CHECK-LABEL: define {{[^@]+}}@callee_avx512_legal512_prefer256_call_avx512_legal512_prefer256 |
| ; CHECK-SAME: (<8 x i64>* noalias nocapture nofree nonnull writeonly align 64 dereferenceable(64) [[ARG:%.*]], <8 x i64> [[TMP0:%.*]]) |
| ; CHECK-NEXT: bb: |
| ; CHECK-NEXT: [[ARG1_PRIV:%.*]] = alloca <8 x i64> |
| ; CHECK-NEXT: store <8 x i64> [[TMP0]], <8 x i64>* [[ARG1_PRIV]] |
| ; CHECK-NEXT: [[TMP:%.*]] = load <8 x i64>, <8 x i64>* [[ARG1_PRIV]], align 64 |
| ; CHECK-NEXT: store <8 x i64> [[TMP]], <8 x i64>* [[ARG]], align 64 |
| ; CHECK-NEXT: ret void |
| ; |
| bb: |
| %tmp = load <8 x i64>, <8 x i64>* %arg1 |
| store <8 x i64> %tmp, <8 x i64>* %arg |
| ret void |
| } |
| |
| define void @avx512_legal512_prefer256_call_avx512_legal512_prefer256(<8 x i64>* %arg) #1 { |
| ; CHECK-LABEL: define {{[^@]+}}@avx512_legal512_prefer256_call_avx512_legal512_prefer256 |
| ; CHECK-SAME: (<8 x i64>* nocapture writeonly [[ARG:%.*]]) |
| ; CHECK-NEXT: bb: |
| ; CHECK-NEXT: [[TMP:%.*]] = alloca <8 x i64>, align 32 |
| ; CHECK-NEXT: [[TMP2:%.*]] = alloca <8 x i64>, align 32 |
| ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i64>* [[TMP]] to i8* |
| ; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* nonnull writeonly align 32 dereferenceable(64) [[TMP3]], i8 0, i64 32, i1 false) |
| ; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i64>, <8 x i64>* [[TMP]], align 1 |
| ; CHECK-NEXT: call fastcc void @callee_avx512_legal512_prefer256_call_avx512_legal512_prefer256(<8 x i64>* noalias nocapture nofree nonnull writeonly align 64 dereferenceable(64) [[TMP2]], <8 x i64> [[TMP0]]) |
| ; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, <8 x i64>* [[TMP2]], align 64 |
| ; CHECK-NEXT: store <8 x i64> [[TMP4]], <8 x i64>* [[ARG]], align 2 |
| ; CHECK-NEXT: ret void |
| ; |
| bb: |
| %tmp = alloca <8 x i64>, align 32 |
| %tmp2 = alloca <8 x i64>, align 32 |
| %tmp3 = bitcast <8 x i64>* %tmp to i8* |
| call void @llvm.memset.p0i8.i64(i8* align 32 %tmp3, i8 0, i64 32, i1 false) |
| call fastcc void @callee_avx512_legal512_prefer256_call_avx512_legal512_prefer256(<8 x i64>* %tmp2, <8 x i64>* %tmp) |
| %tmp4 = load <8 x i64>, <8 x i64>* %tmp2, align 32 |
| store <8 x i64> %tmp4, <8 x i64>* %arg, align 2 |
| ret void |
| } |
| |
| ; This should promote |
| define internal fastcc void @callee_avx512_legal512_prefer512_call_avx512_legal512_prefer256(<8 x i64>* %arg, <8 x i64>* readonly %arg1) #1 { |
| ; CHECK-LABEL: define {{[^@]+}}@callee_avx512_legal512_prefer512_call_avx512_legal512_prefer256 |
| ; CHECK-SAME: (<8 x i64>* noalias nocapture nofree nonnull writeonly align 64 dereferenceable(64) [[ARG:%.*]], <8 x i64> [[TMP0:%.*]]) |
| ; CHECK-NEXT: bb: |
| ; CHECK-NEXT: [[ARG1_PRIV:%.*]] = alloca <8 x i64> |
| ; CHECK-NEXT: store <8 x i64> [[TMP0]], <8 x i64>* [[ARG1_PRIV]] |
| ; CHECK-NEXT: [[TMP:%.*]] = load <8 x i64>, <8 x i64>* [[ARG1_PRIV]], align 64 |
| ; CHECK-NEXT: store <8 x i64> [[TMP]], <8 x i64>* [[ARG]], align 64 |
| ; CHECK-NEXT: ret void |
| ; |
| bb: |
| %tmp = load <8 x i64>, <8 x i64>* %arg1 |
| store <8 x i64> %tmp, <8 x i64>* %arg |
| ret void |
| } |
| |
| define void @avx512_legal512_prefer512_call_avx512_legal512_prefer256(<8 x i64>* %arg) #0 { |
| ; CHECK-LABEL: define {{[^@]+}}@avx512_legal512_prefer512_call_avx512_legal512_prefer256 |
| ; CHECK-SAME: (<8 x i64>* nocapture writeonly [[ARG:%.*]]) |
| ; CHECK-NEXT: bb: |
| ; CHECK-NEXT: [[TMP:%.*]] = alloca <8 x i64>, align 32 |
| ; CHECK-NEXT: [[TMP2:%.*]] = alloca <8 x i64>, align 32 |
| ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i64>* [[TMP]] to i8* |
| ; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* nonnull writeonly align 32 dereferenceable(64) [[TMP3]], i8 0, i64 32, i1 false) |
| ; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i64>, <8 x i64>* [[TMP]], align 1 |
| ; CHECK-NEXT: call fastcc void @callee_avx512_legal512_prefer512_call_avx512_legal512_prefer256(<8 x i64>* noalias nocapture nofree nonnull writeonly align 64 dereferenceable(64) [[TMP2]], <8 x i64> [[TMP0]]) |
| ; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, <8 x i64>* [[TMP2]], align 64 |
| ; CHECK-NEXT: store <8 x i64> [[TMP4]], <8 x i64>* [[ARG]], align 2 |
| ; CHECK-NEXT: ret void |
| ; |
| bb: |
| %tmp = alloca <8 x i64>, align 32 |
| %tmp2 = alloca <8 x i64>, align 32 |
| %tmp3 = bitcast <8 x i64>* %tmp to i8* |
| call void @llvm.memset.p0i8.i64(i8* align 32 %tmp3, i8 0, i64 32, i1 false) |
| call fastcc void @callee_avx512_legal512_prefer512_call_avx512_legal512_prefer256(<8 x i64>* %tmp2, <8 x i64>* %tmp) |
| %tmp4 = load <8 x i64>, <8 x i64>* %tmp2, align 32 |
| store <8 x i64> %tmp4, <8 x i64>* %arg, align 2 |
| ret void |
| } |
| |
| ; This should promote |
| define internal fastcc void @callee_avx512_legal512_prefer256_call_avx512_legal512_prefer512(<8 x i64>* %arg, <8 x i64>* readonly %arg1) #0 { |
| ; CHECK-LABEL: define {{[^@]+}}@callee_avx512_legal512_prefer256_call_avx512_legal512_prefer512 |
| ; CHECK-SAME: (<8 x i64>* noalias nocapture nofree nonnull writeonly align 64 dereferenceable(64) [[ARG:%.*]], <8 x i64> [[TMP0:%.*]]) |
| ; CHECK-NEXT: bb: |
| ; CHECK-NEXT: [[ARG1_PRIV:%.*]] = alloca <8 x i64> |
| ; CHECK-NEXT: store <8 x i64> [[TMP0]], <8 x i64>* [[ARG1_PRIV]] |
| ; CHECK-NEXT: [[TMP:%.*]] = load <8 x i64>, <8 x i64>* [[ARG1_PRIV]], align 64 |
| ; CHECK-NEXT: store <8 x i64> [[TMP]], <8 x i64>* [[ARG]], align 64 |
| ; CHECK-NEXT: ret void |
| ; |
| bb: |
| %tmp = load <8 x i64>, <8 x i64>* %arg1 |
| store <8 x i64> %tmp, <8 x i64>* %arg |
| ret void |
| } |
| |
| define void @avx512_legal512_prefer256_call_avx512_legal512_prefer512(<8 x i64>* %arg) #1 { |
| ; CHECK-LABEL: define {{[^@]+}}@avx512_legal512_prefer256_call_avx512_legal512_prefer512 |
| ; CHECK-SAME: (<8 x i64>* nocapture writeonly [[ARG:%.*]]) |
| ; CHECK-NEXT: bb: |
| ; CHECK-NEXT: [[TMP:%.*]] = alloca <8 x i64>, align 32 |
| ; CHECK-NEXT: [[TMP2:%.*]] = alloca <8 x i64>, align 32 |
| ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i64>* [[TMP]] to i8* |
| ; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* nonnull writeonly align 32 dereferenceable(64) [[TMP3]], i8 0, i64 32, i1 false) |
| ; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i64>, <8 x i64>* [[TMP]], align 1 |
| ; CHECK-NEXT: call fastcc void @callee_avx512_legal512_prefer256_call_avx512_legal512_prefer512(<8 x i64>* noalias nocapture nofree nonnull writeonly align 64 dereferenceable(64) [[TMP2]], <8 x i64> [[TMP0]]) |
| ; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, <8 x i64>* [[TMP2]], align 64 |
| ; CHECK-NEXT: store <8 x i64> [[TMP4]], <8 x i64>* [[ARG]], align 2 |
| ; CHECK-NEXT: ret void |
| ; |
| bb: |
| %tmp = alloca <8 x i64>, align 32 |
| %tmp2 = alloca <8 x i64>, align 32 |
| %tmp3 = bitcast <8 x i64>* %tmp to i8* |
| call void @llvm.memset.p0i8.i64(i8* align 32 %tmp3, i8 0, i64 32, i1 false) |
| call fastcc void @callee_avx512_legal512_prefer256_call_avx512_legal512_prefer512(<8 x i64>* %tmp2, <8 x i64>* %tmp) |
| %tmp4 = load <8 x i64>, <8 x i64>* %tmp2, align 32 |
| store <8 x i64> %tmp4, <8 x i64>* %arg, align 2 |
| ret void |
| } |
| |
| ; This should not promote |
| define internal fastcc void @callee_avx512_legal256_prefer256_call_avx512_legal512_prefer256(<8 x i64>* %arg, <8 x i64>* readonly %arg1) #1 { |
| ; CHECK-LABEL: define {{[^@]+}}@callee_avx512_legal256_prefer256_call_avx512_legal512_prefer256 |
| ; CHECK-SAME: (<8 x i64>* noalias nocapture nofree nonnull writeonly align 64 dereferenceable(64) [[ARG:%.*]], <8 x i64>* noalias nocapture nofree nonnull readonly align 64 dereferenceable(64) [[ARG1:%.*]]) |
| ; CHECK-NEXT: bb: |
| ; CHECK-NEXT: [[TMP:%.*]] = load <8 x i64>, <8 x i64>* [[ARG1]], align 64 |
| ; CHECK-NEXT: store <8 x i64> [[TMP]], <8 x i64>* [[ARG]], align 64 |
| ; CHECK-NEXT: ret void |
| ; |
| bb: |
| %tmp = load <8 x i64>, <8 x i64>* %arg1 |
| store <8 x i64> %tmp, <8 x i64>* %arg |
| ret void |
| } |
| |
| define void @avx512_legal256_prefer256_call_avx512_legal512_prefer256(<8 x i64>* %arg) #2 { |
| ; CHECK-LABEL: define {{[^@]+}}@avx512_legal256_prefer256_call_avx512_legal512_prefer256 |
| ; CHECK-SAME: (<8 x i64>* nocapture writeonly [[ARG:%.*]]) |
| ; CHECK-NEXT: bb: |
| ; CHECK-NEXT: [[TMP:%.*]] = alloca <8 x i64>, align 32 |
| ; CHECK-NEXT: [[TMP2:%.*]] = alloca <8 x i64>, align 32 |
| ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i64>* [[TMP]] to i8* |
| ; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* nonnull writeonly align 32 dereferenceable(64) [[TMP3]], i8 0, i64 32, i1 false) |
| ; CHECK-NEXT: call fastcc void @callee_avx512_legal256_prefer256_call_avx512_legal512_prefer256(<8 x i64>* noalias nocapture nofree nonnull writeonly align 64 dereferenceable(64) [[TMP2]], <8 x i64>* noalias nocapture nofree nonnull readonly align 64 dereferenceable(64) [[TMP]]) |
| ; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, <8 x i64>* [[TMP2]], align 64 |
| ; CHECK-NEXT: store <8 x i64> [[TMP4]], <8 x i64>* [[ARG]], align 2 |
| ; CHECK-NEXT: ret void |
| ; |
| bb: |
| %tmp = alloca <8 x i64>, align 32 |
| %tmp2 = alloca <8 x i64>, align 32 |
| %tmp3 = bitcast <8 x i64>* %tmp to i8* |
| call void @llvm.memset.p0i8.i64(i8* align 32 %tmp3, i8 0, i64 32, i1 false) |
| call fastcc void @callee_avx512_legal256_prefer256_call_avx512_legal512_prefer256(<8 x i64>* %tmp2, <8 x i64>* %tmp) |
| %tmp4 = load <8 x i64>, <8 x i64>* %tmp2, align 32 |
| store <8 x i64> %tmp4, <8 x i64>* %arg, align 2 |
| ret void |
| } |
| |
| ; This should not promote |
| define internal fastcc void @callee_avx512_legal512_prefer256_call_avx512_legal256_prefer256(<8 x i64>* %arg, <8 x i64>* readonly %arg1) #2 { |
| ; CHECK-LABEL: define {{[^@]+}}@callee_avx512_legal512_prefer256_call_avx512_legal256_prefer256 |
| ; CHECK-SAME: (<8 x i64>* noalias nocapture nofree nonnull writeonly align 64 dereferenceable(64) [[ARG:%.*]], <8 x i64>* noalias nocapture nofree nonnull readonly align 64 dereferenceable(64) [[ARG1:%.*]]) |
| ; CHECK-NEXT: bb: |
| ; CHECK-NEXT: [[TMP:%.*]] = load <8 x i64>, <8 x i64>* [[ARG1]], align 64 |
| ; CHECK-NEXT: store <8 x i64> [[TMP]], <8 x i64>* [[ARG]], align 64 |
| ; CHECK-NEXT: ret void |
| ; |
| bb: |
| %tmp = load <8 x i64>, <8 x i64>* %arg1 |
| store <8 x i64> %tmp, <8 x i64>* %arg |
| ret void |
| } |
| |
| define void @avx512_legal512_prefer256_call_avx512_legal256_prefer256(<8 x i64>* %arg) #1 { |
| ; CHECK-LABEL: define {{[^@]+}}@avx512_legal512_prefer256_call_avx512_legal256_prefer256 |
| ; CHECK-SAME: (<8 x i64>* nocapture writeonly [[ARG:%.*]]) |
| ; CHECK-NEXT: bb: |
| ; CHECK-NEXT: [[TMP:%.*]] = alloca <8 x i64>, align 32 |
| ; CHECK-NEXT: [[TMP2:%.*]] = alloca <8 x i64>, align 32 |
| ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i64>* [[TMP]] to i8* |
| ; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* nonnull writeonly align 32 dereferenceable(64) [[TMP3]], i8 0, i64 32, i1 false) |
| ; CHECK-NEXT: call fastcc void @callee_avx512_legal512_prefer256_call_avx512_legal256_prefer256(<8 x i64>* noalias nocapture nofree nonnull writeonly align 64 dereferenceable(64) [[TMP2]], <8 x i64>* noalias nocapture nofree nonnull readonly align 64 dereferenceable(64) [[TMP]]) |
| ; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, <8 x i64>* [[TMP2]], align 64 |
| ; CHECK-NEXT: store <8 x i64> [[TMP4]], <8 x i64>* [[ARG]], align 2 |
| ; CHECK-NEXT: ret void |
| ; |
| bb: |
| %tmp = alloca <8 x i64>, align 32 |
| %tmp2 = alloca <8 x i64>, align 32 |
| %tmp3 = bitcast <8 x i64>* %tmp to i8* |
| call void @llvm.memset.p0i8.i64(i8* align 32 %tmp3, i8 0, i64 32, i1 false) |
| call fastcc void @callee_avx512_legal512_prefer256_call_avx512_legal256_prefer256(<8 x i64>* %tmp2, <8 x i64>* %tmp) |
| %tmp4 = load <8 x i64>, <8 x i64>* %tmp2, align 32 |
| store <8 x i64> %tmp4, <8 x i64>* %arg, align 2 |
| ret void |
| } |
| |
| ; This should promote |
| define internal fastcc void @callee_avx2_legal256_prefer256_call_avx2_legal512_prefer256(<8 x i64>* %arg, <8 x i64>* readonly %arg1) #3 { |
| ; CHECK-LABEL: define {{[^@]+}}@callee_avx2_legal256_prefer256_call_avx2_legal512_prefer256 |
| ; CHECK-SAME: (<8 x i64>* noalias nocapture nofree nonnull writeonly align 64 dereferenceable(64) [[ARG:%.*]], <8 x i64> [[TMP0:%.*]]) |
| ; CHECK-NEXT: bb: |
| ; CHECK-NEXT: [[ARG1_PRIV:%.*]] = alloca <8 x i64> |
| ; CHECK-NEXT: store <8 x i64> [[TMP0]], <8 x i64>* [[ARG1_PRIV]] |
| ; CHECK-NEXT: [[TMP:%.*]] = load <8 x i64>, <8 x i64>* [[ARG1_PRIV]], align 64 |
| ; CHECK-NEXT: store <8 x i64> [[TMP]], <8 x i64>* [[ARG]], align 64 |
| ; CHECK-NEXT: ret void |
| ; |
| bb: |
| %tmp = load <8 x i64>, <8 x i64>* %arg1 |
| store <8 x i64> %tmp, <8 x i64>* %arg |
| ret void |
| } |
| |
| define void @avx2_legal256_prefer256_call_avx2_legal512_prefer256(<8 x i64>* %arg) #4 { |
| ; CHECK-LABEL: define {{[^@]+}}@avx2_legal256_prefer256_call_avx2_legal512_prefer256 |
| ; CHECK-SAME: (<8 x i64>* nocapture writeonly [[ARG:%.*]]) |
| ; CHECK-NEXT: bb: |
| ; CHECK-NEXT: [[TMP:%.*]] = alloca <8 x i64>, align 32 |
| ; CHECK-NEXT: [[TMP2:%.*]] = alloca <8 x i64>, align 32 |
| ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i64>* [[TMP]] to i8* |
| ; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* nonnull writeonly align 32 dereferenceable(64) [[TMP3]], i8 0, i64 32, i1 false) |
| ; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i64>, <8 x i64>* [[TMP]], align 1 |
| ; CHECK-NEXT: call fastcc void @callee_avx2_legal256_prefer256_call_avx2_legal512_prefer256(<8 x i64>* noalias nocapture nofree nonnull writeonly align 64 dereferenceable(64) [[TMP2]], <8 x i64> [[TMP0]]) |
| ; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, <8 x i64>* [[TMP2]], align 64 |
| ; CHECK-NEXT: store <8 x i64> [[TMP4]], <8 x i64>* [[ARG]], align 2 |
| ; CHECK-NEXT: ret void |
| ; |
| bb: |
| %tmp = alloca <8 x i64>, align 32 |
| %tmp2 = alloca <8 x i64>, align 32 |
| %tmp3 = bitcast <8 x i64>* %tmp to i8* |
| call void @llvm.memset.p0i8.i64(i8* align 32 %tmp3, i8 0, i64 32, i1 false) |
| call fastcc void @callee_avx2_legal256_prefer256_call_avx2_legal512_prefer256(<8 x i64>* %tmp2, <8 x i64>* %tmp) |
| %tmp4 = load <8 x i64>, <8 x i64>* %tmp2, align 32 |
| store <8 x i64> %tmp4, <8 x i64>* %arg, align 2 |
| ret void |
| } |
| |
| ; This should promote |
| define internal fastcc void @callee_avx2_legal512_prefer256_call_avx2_legal256_prefer256(<8 x i64>* %arg, <8 x i64>* readonly %arg1) #4 { |
| ; CHECK-LABEL: define {{[^@]+}}@callee_avx2_legal512_prefer256_call_avx2_legal256_prefer256 |
| ; CHECK-SAME: (<8 x i64>* noalias nocapture nofree nonnull writeonly align 64 dereferenceable(64) [[ARG:%.*]], <8 x i64> [[TMP0:%.*]]) |
| ; CHECK-NEXT: bb: |
| ; CHECK-NEXT: [[ARG1_PRIV:%.*]] = alloca <8 x i64> |
| ; CHECK-NEXT: store <8 x i64> [[TMP0]], <8 x i64>* [[ARG1_PRIV]] |
| ; CHECK-NEXT: [[TMP:%.*]] = load <8 x i64>, <8 x i64>* [[ARG1_PRIV]], align 64 |
| ; CHECK-NEXT: store <8 x i64> [[TMP]], <8 x i64>* [[ARG]], align 64 |
| ; CHECK-NEXT: ret void |
| ; |
| bb: |
| %tmp = load <8 x i64>, <8 x i64>* %arg1 |
| store <8 x i64> %tmp, <8 x i64>* %arg |
| ret void |
| } |
| |
| define void @avx2_legal512_prefer256_call_avx2_legal256_prefer256(<8 x i64>* %arg) #3 { |
| ; CHECK-LABEL: define {{[^@]+}}@avx2_legal512_prefer256_call_avx2_legal256_prefer256 |
| ; CHECK-SAME: (<8 x i64>* nocapture writeonly [[ARG:%.*]]) |
| ; CHECK-NEXT: bb: |
| ; CHECK-NEXT: [[TMP:%.*]] = alloca <8 x i64>, align 32 |
| ; CHECK-NEXT: [[TMP2:%.*]] = alloca <8 x i64>, align 32 |
| ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i64>* [[TMP]] to i8* |
| ; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* nonnull writeonly align 32 dereferenceable(64) [[TMP3]], i8 0, i64 32, i1 false) |
| ; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i64>, <8 x i64>* [[TMP]], align 1 |
| ; CHECK-NEXT: call fastcc void @callee_avx2_legal512_prefer256_call_avx2_legal256_prefer256(<8 x i64>* noalias nocapture nofree nonnull writeonly align 64 dereferenceable(64) [[TMP2]], <8 x i64> [[TMP0]]) |
| ; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, <8 x i64>* [[TMP2]], align 64 |
| ; CHECK-NEXT: store <8 x i64> [[TMP4]], <8 x i64>* [[ARG]], align 2 |
| ; CHECK-NEXT: ret void |
| ; |
| bb: |
| %tmp = alloca <8 x i64>, align 32 |
| %tmp2 = alloca <8 x i64>, align 32 |
| %tmp3 = bitcast <8 x i64>* %tmp to i8* |
| call void @llvm.memset.p0i8.i64(i8* align 32 %tmp3, i8 0, i64 32, i1 false) |
| call fastcc void @callee_avx2_legal512_prefer256_call_avx2_legal256_prefer256(<8 x i64>* %tmp2, <8 x i64>* %tmp) |
| %tmp4 = load <8 x i64>, <8 x i64>* %tmp2, align 32 |
| store <8 x i64> %tmp4, <8 x i64>* %arg, align 2 |
| ret void |
| } |
| |
| ; Function Attrs: argmemonly nounwind |
| declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1) #5 |
| |
| attributes #0 = { inlinehint norecurse nounwind uwtable "target-features"="+avx512vl" "min-legal-vector-width"="512" "prefer-vector-width"="512" } |
| attributes #1 = { inlinehint norecurse nounwind uwtable "target-features"="+avx512vl" "min-legal-vector-width"="512" "prefer-vector-width"="256" } |
| attributes #2 = { inlinehint norecurse nounwind uwtable "target-features"="+avx512vl" "min-legal-vector-width"="256" "prefer-vector-width"="256" } |
| attributes #3 = { inlinehint norecurse nounwind uwtable "target-features"="+avx2" "min-legal-vector-width"="512" "prefer-vector-width"="256" } |
| attributes #4 = { inlinehint norecurse nounwind uwtable "target-features"="+avx2" "min-legal-vector-width"="256" "prefer-vector-width"="256" } |
| attributes #5 = { argmemonly nounwind } |