| ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py |
| ; RUN: opt < %s -passes=memcpyopt -S -verify-memoryssa | FileCheck %s |
| |
| ; Test whether memcpy-memcpy dependence is optimized across |
| ; basic blocks (conditional branches and invokes). |
| ; TODO: This is not supported yet. |
| |
| %struct.s = type { i32, i32 } |
| |
| @s_foo = private unnamed_addr constant %struct.s { i32 1, i32 2 }, align 4 |
| @s_baz = private unnamed_addr constant %struct.s { i32 1, i32 2 }, align 4 |
| @i = external constant ptr |
| |
| declare void @qux() |
| declare void @llvm.memcpy.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1) |
| declare void @__cxa_throw(ptr, ptr, ptr) |
| declare i32 @__gxx_personality_v0(...) |
| declare ptr @__cxa_begin_catch(ptr) |
| |
| ; A simple partial redundancy. Test that the second memcpy is optimized |
| ; to copy directly from the original source rather than from the temporary. |
| |
| define void @wobble(ptr noalias %dst, ptr %src, i1 %some_condition) { |
| ; CHECK-LABEL: @wobble( |
| ; CHECK-NEXT: bb: |
| ; CHECK-NEXT: [[TEMP:%.*]] = alloca i8, i32 64, align 1 |
| ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TEMP]], ptr nonnull align 8 [[SRC:%.*]], i64 64, i1 false) |
| ; CHECK-NEXT: br i1 [[SOME_CONDITION:%.*]], label [[MORE:%.*]], label [[OUT:%.*]] |
| ; CHECK: out: |
| ; CHECK-NEXT: call void @qux() |
| ; CHECK-NEXT: unreachable |
| ; CHECK: more: |
| ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[DST:%.*]], ptr align 8 [[SRC]], i64 64, i1 false) |
| ; CHECK-NEXT: ret void |
| ; |
| bb: |
| %temp = alloca i8, i32 64 |
| call void @llvm.memcpy.p0.p0.i64(ptr align 8 %temp, ptr nonnull align 8%src, i64 64, i1 false) |
| br i1 %some_condition, label %more, label %out |
| |
| out: |
| call void @qux() |
| unreachable |
| |
| more: |
| call void @llvm.memcpy.p0.p0.i64(ptr align 8 %dst, ptr align 8 %temp, i64 64, i1 false) |
| ret void |
| } |
| |
| ; A CFG triangle with a partial redundancy targeting an alloca. Test that the |
| ; memcpy inside the triangle is optimized to copy directly from the original |
| ; source rather than from the temporary. |
| |
| define i32 @foo(i1 %t3) { |
| ; CHECK-LABEL: @foo( |
| ; CHECK-NEXT: bb: |
| ; CHECK-NEXT: [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 4 |
| ; CHECK-NEXT: [[T:%.*]] = alloca [[STRUCT_S]], align 4 |
| ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[S]], ptr align 4 @s_foo, i64 8, i1 false) |
| ; CHECK-NEXT: br i1 [[T3:%.*]], label [[BB4:%.*]], label [[BB7:%.*]] |
| ; CHECK: bb4: |
| ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[T]], ptr align 4 @s_foo, i64 8, i1 false) |
| ; CHECK-NEXT: br label [[BB7]] |
| ; CHECK: bb7: |
| ; CHECK-NEXT: [[T9:%.*]] = load i32, ptr [[T]], align 4 |
| ; CHECK-NEXT: [[T10:%.*]] = getelementptr [[STRUCT_S]], ptr [[T]], i32 0, i32 1 |
| ; CHECK-NEXT: [[T11:%.*]] = load i32, ptr [[T10]], align 4 |
| ; CHECK-NEXT: [[T12:%.*]] = add i32 [[T9]], [[T11]] |
| ; CHECK-NEXT: ret i32 [[T12]] |
| ; |
| bb: |
| %s = alloca %struct.s, align 4 |
| %t = alloca %struct.s, align 4 |
| call void @llvm.memcpy.p0.p0.i64(ptr align 4 %s, ptr align 4 @s_foo, i64 8, i1 false) |
| br i1 %t3, label %bb4, label %bb7 |
| |
| bb4: ; preds = %bb |
| call void @llvm.memcpy.p0.p0.i64(ptr align 4 %t, ptr align 4 %s, i64 8, i1 false) |
| br label %bb7 |
| |
| bb7: ; preds = %bb4, %bb |
| %t9 = load i32, ptr %t, align 4 |
| %t10 = getelementptr %struct.s, ptr %t, i32 0, i32 1 |
| %t11 = load i32, ptr %t10, align 4 |
| %t12 = add i32 %t9, %t11 |
| ret i32 %t12 |
| } |
| |
| ; A CFG diamond with an invoke on one side, and a partially redundant memcpy |
| ; into an alloca on the other. Test that the memcpy inside the diamond is |
| ; optimized to copy ; directly from the original source rather than from the |
| ; temporary. This more complex test represents a relatively common usage |
| ; pattern. |
| |
| define i32 @baz(i1 %t5) personality ptr @__gxx_personality_v0 { |
| ; CHECK-LABEL: @baz( |
| ; CHECK-NEXT: bb: |
| ; CHECK-NEXT: [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 4 |
| ; CHECK-NEXT: [[T:%.*]] = alloca [[STRUCT_S]], align 4 |
| ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[S]], ptr align 4 @s_baz, i64 8, i1 false) |
| ; CHECK-NEXT: br i1 [[T5:%.*]], label [[BB6:%.*]], label [[BB22:%.*]] |
| ; CHECK: bb6: |
| ; CHECK-NEXT: invoke void @__cxa_throw(ptr null, ptr @i, ptr null) |
| ; CHECK-NEXT: to label [[BB25:%.*]] unwind label [[BB9:%.*]] |
| ; CHECK: bb9: |
| ; CHECK-NEXT: [[T10:%.*]] = landingpad { ptr, i32 } |
| ; CHECK-NEXT: catch ptr null |
| ; CHECK-NEXT: br label [[BB13:%.*]] |
| ; CHECK: bb13: |
| ; CHECK-NEXT: [[T15:%.*]] = call ptr @__cxa_begin_catch(ptr null) |
| ; CHECK-NEXT: br label [[BB23:%.*]] |
| ; CHECK: bb22: |
| ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[T]], ptr align 4 @s_baz, i64 8, i1 false) |
| ; CHECK-NEXT: br label [[BB23]] |
| ; CHECK: bb23: |
| ; CHECK-NEXT: [[T18:%.*]] = load i32, ptr [[T]], align 4 |
| ; CHECK-NEXT: [[T19:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[T]], i32 0, i32 1 |
| ; CHECK-NEXT: [[T20:%.*]] = load i32, ptr [[T19]], align 4 |
| ; CHECK-NEXT: [[T21:%.*]] = add nsw i32 [[T18]], [[T20]] |
| ; CHECK-NEXT: ret i32 [[T21]] |
| ; CHECK: bb25: |
| ; CHECK-NEXT: unreachable |
| ; |
| bb: |
| %s = alloca %struct.s, align 4 |
| %t = alloca %struct.s, align 4 |
| call void @llvm.memcpy.p0.p0.i64(ptr align 4 %s, ptr align 4 @s_baz, i64 8, i1 false) |
| br i1 %t5, label %bb6, label %bb22 |
| |
| bb6: ; preds = %bb |
| invoke void @__cxa_throw(ptr null, ptr @i, ptr null) |
| to label %bb25 unwind label %bb9 |
| |
| bb9: ; preds = %bb6 |
| %t10 = landingpad { ptr, i32 } |
| catch ptr null |
| br label %bb13 |
| |
| bb13: ; preds = %bb9 |
| %t15 = call ptr @__cxa_begin_catch(ptr null) |
| br label %bb23 |
| |
| bb22: ; preds = %bb |
| call void @llvm.memcpy.p0.p0.i64(ptr align 4 %t, ptr align 4 %s, i64 8, i1 false) |
| br label %bb23 |
| |
| bb23: ; preds = %bb22, %bb13 |
| %t18 = load i32, ptr %t, align 4 |
| %t19 = getelementptr inbounds %struct.s, ptr %t, i32 0, i32 1 |
| %t20 = load i32, ptr %t19, align 4 |
| %t21 = add nsw i32 %t18, %t20 |
| ret i32 %t21 |
| |
| bb25: ; preds = %bb6 |
| unreachable |
| } |
| |
| define void @memphi_with_unrelated_clobber(i1 %cond, ptr %arg, ptr noalias %a, ptr noalias %b, ptr noalias %c) { |
| ; CHECK-LABEL: @memphi_with_unrelated_clobber( |
| ; CHECK-NEXT: entry: |
| ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[A:%.*]], ptr [[B:%.*]], i64 16, i1 false) |
| ; CHECK-NEXT: br i1 [[COND:%.*]], label [[THEN:%.*]], label [[EXIT:%.*]] |
| ; CHECK: then: |
| ; CHECK-NEXT: store i64 0, ptr [[ARG:%.*]], align 4 |
| ; CHECK-NEXT: br label [[EXIT]] |
| ; CHECK: exit: |
| ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[C:%.*]], ptr [[B]], i64 16, i1 false) |
| ; CHECK-NEXT: ret void |
| ; |
| entry: |
| call void @llvm.memcpy.p0.p0.i64(ptr %a, ptr %b, i64 16, i1 false) |
| br i1 %cond, label %then, label %exit |
| |
| then: |
| store i64 0, ptr %arg |
| br label %exit |
| |
| exit: |
| call void @llvm.memcpy.p0.p0.i64(ptr %c, ptr %a, i64 16, i1 false) |
| ret void |
| } |