| ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py |
| ; RUN: opt < %s -aa-pipeline=basic-aa -passes='dse,verify<memoryssa>' -S | FileCheck %s |
| target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128" |
| |
| declare void @memset_pattern16(ptr, ptr, i64) |
| |
| declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) nounwind |
| declare void @llvm.memset.element.unordered.atomic.p0.i64(ptr nocapture, i8, i64, i32) nounwind |
| declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind |
| declare void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i32) nounwind |
| declare void @llvm.init.trampoline(ptr, ptr, ptr) |
| |
| ; **** Noop load->store tests ************************************************** |
| |
| ; We CAN optimize volatile loads. |
| define void @test_load_volatile(ptr %Q) { |
| ; CHECK-LABEL: @test_load_volatile( |
| ; CHECK-NEXT: [[A:%.*]] = load volatile i32, ptr [[Q:%.*]], align 4 |
| ; CHECK-NEXT: ret void |
| ; |
| %a = load volatile i32, ptr %Q |
| store i32 %a, ptr %Q |
| ret void |
| } |
| |
| ; We can NOT optimize volatile stores. |
| define void @test_store_volatile(ptr %Q) { |
| ; CHECK-LABEL: @test_store_volatile( |
| ; CHECK-NEXT: [[A:%.*]] = load i32, ptr [[Q:%.*]], align 4 |
| ; CHECK-NEXT: store volatile i32 [[A]], ptr [[Q]], align 4 |
| ; CHECK-NEXT: ret void |
| ; |
| %a = load i32, ptr %Q |
| store volatile i32 %a, ptr %Q |
| ret void |
| } |
| |
| ; PR2599 - load -> store to same address. |
| define void @test12(ptr %x) nounwind { |
| ; CHECK-LABEL: @test12( |
| ; CHECK-NEXT: [[TEMP7:%.*]] = getelementptr { i32, i32 }, ptr [[X:%.*]], i32 0, i32 1 |
| ; CHECK-NEXT: [[TEMP8:%.*]] = load i32, ptr [[TEMP7]], align 4 |
| ; CHECK-NEXT: [[TEMP17:%.*]] = sub i32 0, [[TEMP8]] |
| ; CHECK-NEXT: store i32 [[TEMP17]], ptr [[TEMP7]], align 4 |
| ; CHECK-NEXT: ret void |
| ; |
| %temp4 = getelementptr { i32, i32 }, ptr %x, i32 0, i32 0 |
| %temp5 = load i32, ptr %temp4, align 4 |
| %temp7 = getelementptr { i32, i32 }, ptr %x, i32 0, i32 1 |
| %temp8 = load i32, ptr %temp7, align 4 |
| %temp17 = sub i32 0, %temp8 |
| store i32 %temp5, ptr %temp4, align 4 |
| store i32 %temp17, ptr %temp7, align 4 |
| ret void |
| } |
| |
| ; Remove redundant store if loaded value is in another block. |
| define i32 @test26(i1 %c, ptr %p) { |
| ; CHECK-LABEL: @test26( |
| ; CHECK-NEXT: entry: |
| ; CHECK-NEXT: br i1 [[C:%.*]], label [[BB1:%.*]], label [[BB2:%.*]] |
| ; CHECK: bb1: |
| ; CHECK-NEXT: br label [[BB3:%.*]] |
| ; CHECK: bb2: |
| ; CHECK-NEXT: br label [[BB3]] |
| ; CHECK: bb3: |
| ; CHECK-NEXT: ret i32 0 |
| ; |
| entry: |
| %v = load i32, ptr %p, align 4 |
| br i1 %c, label %bb1, label %bb2 |
| bb1: |
| br label %bb3 |
| bb2: |
| store i32 %v, ptr %p, align 4 |
| br label %bb3 |
| bb3: |
| ret i32 0 |
| } |
| |
| ; Remove redundant store if loaded value is in another block. |
| define i32 @test27(i1 %c, ptr %p) { |
| ; CHECK-LABEL: @test27( |
| ; CHECK-NEXT: entry: |
| ; CHECK-NEXT: br i1 [[C:%.*]], label [[BB1:%.*]], label [[BB2:%.*]] |
| ; CHECK: bb1: |
| ; CHECK-NEXT: br label [[BB3:%.*]] |
| ; CHECK: bb2: |
| ; CHECK-NEXT: br label [[BB3]] |
| ; CHECK: bb3: |
| ; CHECK-NEXT: ret i32 0 |
| ; |
| entry: |
| %v = load i32, ptr %p, align 4 |
| br i1 %c, label %bb1, label %bb2 |
| bb1: |
| br label %bb3 |
| bb2: |
| br label %bb3 |
| bb3: |
| store i32 %v, ptr %p, align 4 |
| ret i32 0 |
| } |
| |
| ; Remove redundant store if loaded value is in another block inside a loop. |
| define i32 @test31(i1 %c, ptr %p, i32 %i) { |
| ; CHECK-LABEL: @test31( |
| ; CHECK-NEXT: entry: |
| ; CHECK-NEXT: br label [[BB1:%.*]] |
| ; CHECK: bb1: |
| ; CHECK-NEXT: br i1 [[C:%.*]], label [[BB1]], label [[BB2:%.*]] |
| ; CHECK: bb2: |
| ; CHECK-NEXT: ret i32 0 |
| ; |
| entry: |
| %v = load i32, ptr %p, align 4 |
| br label %bb1 |
| bb1: |
| store i32 %v, ptr %p, align 4 |
| br i1 %c, label %bb1, label %bb2 |
| bb2: |
| ret i32 0 |
| } |
| |
| ; Don't remove "redundant" store if %p is possibly stored to. |
| define i32 @test46(i1 %c, ptr %p, ptr %p2, i32 %i) { |
| ; CHECK-LABEL: @test46( |
| ; CHECK-NEXT: entry: |
| ; CHECK-NEXT: [[V:%.*]] = load i32, ptr [[P:%.*]], align 4 |
| ; CHECK-NEXT: br label [[BB1:%.*]] |
| ; CHECK: bb1: |
| ; CHECK-NEXT: store i32 [[V]], ptr [[P]], align 4 |
| ; CHECK-NEXT: br i1 [[C:%.*]], label [[BB1]], label [[BB2:%.*]] |
| ; CHECK: bb2: |
| ; CHECK-NEXT: store i32 0, ptr [[P2:%.*]], align 4 |
| ; CHECK-NEXT: br i1 [[C]], label [[BB3:%.*]], label [[BB1]] |
| ; CHECK: bb3: |
| ; CHECK-NEXT: ret i32 0 |
| ; |
| entry: |
| %v = load i32, ptr %p, align 4 |
| br label %bb1 |
| bb1: |
| store i32 %v, ptr %p, align 4 |
| br i1 %c, label %bb1, label %bb2 |
| bb2: |
| store i32 0, ptr %p2, align 4 |
| br i1 %c, label %bb3, label %bb1 |
| bb3: |
| ret i32 0 |
| } |
| |
| declare void @unknown_func() |
| |
| ; Remove redundant store, which is in the lame loop as the load. |
| define i32 @test33(i1 %c, ptr %p, i32 %i) { |
| ; CHECK-LABEL: @test33( |
| ; CHECK-NEXT: entry: |
| ; CHECK-NEXT: br label [[BB1:%.*]] |
| ; CHECK: bb1: |
| ; CHECK-NEXT: br label [[BB2:%.*]] |
| ; CHECK: bb2: |
| ; CHECK-NEXT: call void @unknown_func() |
| ; CHECK-NEXT: br i1 [[C:%.*]], label [[BB1]], label [[BB3:%.*]] |
| ; CHECK: bb3: |
| ; CHECK-NEXT: ret i32 0 |
| ; |
| entry: |
| br label %bb1 |
| bb1: |
| %v = load i32, ptr %p, align 4 |
| br label %bb2 |
| bb2: |
| store i32 %v, ptr %p, align 4 |
| ; Might read and overwrite value at %p, but doesn't matter. |
| call void @unknown_func() |
| br i1 %c, label %bb1, label %bb3 |
| bb3: |
| ret i32 0 |
| } |
| |
| declare void @unkown_write(ptr) |
| |
| ; We can't remove the "noop" store around an unkown write. |
| define void @test43(ptr %Q) { |
| ; CHECK-LABEL: @test43( |
| ; CHECK-NEXT: [[A:%.*]] = load i32, ptr [[Q:%.*]], align 4 |
| ; CHECK-NEXT: call void @unkown_write(ptr [[Q]]) |
| ; CHECK-NEXT: store i32 [[A]], ptr [[Q]], align 4 |
| ; CHECK-NEXT: ret void |
| ; |
| %a = load i32, ptr %Q |
| call void @unkown_write(ptr %Q) |
| store i32 %a, ptr %Q |
| ret void |
| } |
| |
| ; We CAN remove it when the unkown write comes AFTER. |
| define void @test44(ptr %Q) { |
| ; CHECK-LABEL: @test44( |
| ; CHECK-NEXT: call void @unkown_write(ptr [[Q:%.*]]) |
| ; CHECK-NEXT: ret void |
| ; |
| %a = load i32, ptr %Q |
| store i32 %a, ptr %Q |
| call void @unkown_write(ptr %Q) |
| ret void |
| } |
| |
| define void @test45(ptr %Q) { |
| ; CHECK-LABEL: @test45( |
| ; CHECK-NEXT: ret void |
| ; |
| %a = load i32, ptr %Q |
| store i32 10, ptr %Q |
| store i32 %a, ptr %Q |
| ret void |
| } |
| |
| define i32 @test48(i1 %c, ptr %p) { |
| ; CHECK-LABEL: @test48( |
| ; CHECK-NEXT: entry: |
| ; CHECK-NEXT: [[V:%.*]] = load i32, ptr [[P:%.*]], align 4 |
| ; CHECK-NEXT: br i1 [[C:%.*]], label [[BB0:%.*]], label [[BB0_0:%.*]] |
| ; CHECK: bb0: |
| ; CHECK-NEXT: store i32 0, ptr [[P]], align 4 |
| ; CHECK-NEXT: br i1 [[C]], label [[BB1:%.*]], label [[BB2:%.*]] |
| ; CHECK: bb0.0: |
| ; CHECK-NEXT: br label [[BB1]] |
| ; CHECK: bb1: |
| ; CHECK-NEXT: store i32 [[V]], ptr [[P]], align 4 |
| ; CHECK-NEXT: br i1 [[C]], label [[BB2]], label [[BB0]] |
| ; CHECK: bb2: |
| ; CHECK-NEXT: ret i32 0 |
| ; |
| entry: |
| %v = load i32, ptr %p, align 4 |
| br i1 %c, label %bb0, label %bb0.0 |
| |
| bb0: |
| store i32 0, ptr %p |
| br i1 %c, label %bb1, label %bb2 |
| |
| bb0.0: |
| br label %bb1 |
| |
| bb1: |
| store i32 %v, ptr %p, align 4 |
| br i1 %c, label %bb2, label %bb0 |
| bb2: |
| ret i32 0 |
| } |
| |
| define i32 @test47(i1 %c, ptr %p, i32 %i) { |
| ; CHECK-LABEL: @test47( |
| ; CHECK-NEXT: entry: |
| ; CHECK-NEXT: br label [[BB1:%.*]] |
| ; CHECK: bb1: |
| ; CHECK-NEXT: br i1 [[C:%.*]], label [[BB1]], label [[BB2:%.*]] |
| ; CHECK: bb2: |
| ; CHECK-NEXT: br i1 [[C]], label [[BB3:%.*]], label [[BB1]] |
| ; CHECK: bb3: |
| ; CHECK-NEXT: ret i32 0 |
| ; |
| entry: |
| %v = load i32, ptr %p, align 4 |
| br label %bb1 |
| bb1: |
| store i32 %v, ptr %p, align 4 |
| br i1 %c, label %bb1, label %bb2 |
| bb2: |
| store i32 %v, ptr %p, align 4 |
| br i1 %c, label %bb3, label %bb1 |
| bb3: |
| ret i32 0 |
| } |
| |
| ; Test case from PR47887. |
| define void @test_noalias_store_between_load_and_store(ptr noalias %x, ptr noalias %y) { |
| ; CHECK-LABEL: @test_noalias_store_between_load_and_store( |
| ; CHECK-NEXT: entry: |
| ; CHECK-NEXT: store i32 0, ptr [[Y:%.*]], align 4 |
| ; CHECK-NEXT: ret void |
| ; |
| entry: |
| %lv = load i32, ptr %x, align 4 |
| store i32 0, ptr %y, align 4 |
| store i32 %lv, ptr %x, align 4 |
| ret void |
| } |
| |
| ; Test case from PR47887. Currently we eliminate the dead `store i32 %inc, ptr %x`, |
| ; but not the no-op `store i32 %lv, ptr %x`. That is because no-op stores are |
| ; eliminated before dead stores for the same def. |
| define void @test_noalias_store_between_load_and_store_elimin_order(ptr noalias %x, ptr noalias %y) { |
| ; CHECK-LABEL: @test_noalias_store_between_load_and_store_elimin_order( |
| ; CHECK-NEXT: entry: |
| ; CHECK-NEXT: store i32 0, ptr [[Y:%.*]], align 4 |
| ; CHECK-NEXT: ret void |
| ; |
| entry: |
| %lv = load i32, ptr %x, align 4 |
| %inc = add nsw i32 %lv, 1 |
| store i32 %inc, ptr %x, align 4 |
| store i32 0, ptr %y, align 4 |
| store i32 %lv, ptr %x, align 4 |
| ret void |
| } |
| |
| declare noalias ptr @malloc(i64) |
| declare noalias ptr @_Znwm(i64) |
| declare void @clobber_memory(ptr) |
| |
| ; based on pr25892_lite |
| define ptr @zero_memset_after_malloc(i64 %size) { |
| ; CHECK-LABEL: @zero_memset_after_malloc( |
| ; CHECK-NEXT: [[CALLOC:%.*]] = call ptr @calloc(i64 1, i64 [[SIZE:%.*]]) |
| ; CHECK-NEXT: ret ptr [[CALLOC]] |
| ; |
| %call = call ptr @malloc(i64 %size) inaccessiblememonly |
| call void @llvm.memset.p0.i64(ptr %call, i8 0, i64 %size, i1 false) |
| ret ptr %call |
| } |
| |
| ; based on pr25892_lite |
| define ptr @zero_memset_after_malloc_with_intermediate_clobbering(i64 %size) { |
| ; CHECK-LABEL: @zero_memset_after_malloc_with_intermediate_clobbering( |
| ; CHECK-NEXT: [[CALL:%.*]] = call ptr @malloc(i64 [[SIZE:%.*]]) #[[ATTR7:[0-9]+]] |
| ; CHECK-NEXT: call void @clobber_memory(ptr [[CALL]]) |
| ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr [[CALL]], i8 0, i64 [[SIZE]], i1 false) |
| ; CHECK-NEXT: ret ptr [[CALL]] |
| ; |
| %call = call ptr @malloc(i64 %size) inaccessiblememonly |
| call void @clobber_memory(ptr %call) |
| call void @llvm.memset.p0.i64(ptr %call, i8 0, i64 %size, i1 false) |
| ret ptr %call |
| } |
| |
| ; based on pr25892_lite |
| define ptr @zero_memset_after_malloc_with_different_sizes(i64 %size) { |
| ; CHECK-LABEL: @zero_memset_after_malloc_with_different_sizes( |
| ; CHECK-NEXT: [[CALL:%.*]] = call ptr @malloc(i64 [[SIZE:%.*]]) #[[ATTR7]] |
| ; CHECK-NEXT: [[SIZE2:%.*]] = add nsw i64 [[SIZE]], -1 |
| ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr [[CALL]], i8 0, i64 [[SIZE2]], i1 false) |
| ; CHECK-NEXT: ret ptr [[CALL]] |
| ; |
| %call = call ptr @malloc(i64 %size) inaccessiblememonly |
| %size2 = add nsw i64 %size, -1 |
| call void @llvm.memset.p0.i64(ptr %call, i8 0, i64 %size2, i1 false) |
| ret ptr %call |
| } |
| |
| ; based on pr25892_lite |
| define ptr @zero_memset_after_new(i64 %size) { |
| ; CHECK-LABEL: @zero_memset_after_new( |
| ; CHECK-NEXT: [[CALL:%.*]] = call ptr @_Znwm(i64 [[SIZE:%.*]]) |
| ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr [[CALL]], i8 0, i64 [[SIZE]], i1 false) |
| ; CHECK-NEXT: ret ptr [[CALL]] |
| ; |
| %call = call ptr @_Znwm(i64 %size) |
| call void @llvm.memset.p0.i64(ptr %call, i8 0, i64 %size, i1 false) |
| ret ptr %call |
| } |
| |
| ; This should not create a calloc and should not crash the compiler. |
| define ptr @notmalloc_memset(i64 %size, ptr %notmalloc) { |
| ; CHECK-LABEL: @notmalloc_memset( |
| ; CHECK-NEXT: [[CALL1:%.*]] = call ptr [[NOTMALLOC:%.*]](i64 [[SIZE:%.*]]) |
| ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr [[CALL1]], i8 0, i64 [[SIZE]], i1 false) |
| ; CHECK-NEXT: ret ptr [[CALL1]] |
| ; |
| %call1 = call ptr %notmalloc(i64 %size) |
| call void @llvm.memset.p0.i64(ptr %call1, i8 0, i64 %size, i1 false) |
| ret ptr %call1 |
| } |
| |
| ; This should not create recursive call to calloc. |
| define ptr @calloc(i64 %nmemb, i64 %size) inaccessiblememonly { |
| ; CHECK-LABEL: @calloc( |
| ; CHECK-NEXT: entry: |
| ; CHECK-NEXT: [[MUL:%.*]] = mul i64 [[SIZE:%.*]], [[NMEMB:%.*]] |
| ; CHECK-NEXT: [[CALL:%.*]] = tail call noalias align 16 ptr @malloc(i64 [[MUL]]) |
| ; CHECK-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq ptr [[CALL]], null |
| ; CHECK-NEXT: br i1 [[TOBOOL_NOT]], label [[IF_END:%.*]], label [[IF_THEN:%.*]] |
| ; CHECK: if.then: |
| ; CHECK-NEXT: tail call void @llvm.memset.p0.i64(ptr nonnull align 16 [[CALL]], i8 0, i64 [[MUL]], i1 false) |
| ; CHECK-NEXT: br label [[IF_END]] |
| ; CHECK: if.end: |
| ; CHECK-NEXT: ret ptr [[CALL]] |
| ; |
| entry: |
| %mul = mul i64 %size, %nmemb |
| %call = tail call noalias align 16 ptr @malloc(i64 %mul) |
| %tobool.not = icmp eq ptr %call, null |
| br i1 %tobool.not, label %if.end, label %if.then |
| |
| if.then: ; preds = %entry |
| tail call void @llvm.memset.p0.i64(ptr nonnull align 16 %call, i8 0, i64 %mul, i1 false) |
| br label %if.end |
| |
| if.end: ; preds = %if.then, %entry |
| ret ptr %call |
| } |
| |
| define ptr @pr25892(i64 %size) { |
| ; CHECK-LABEL: @pr25892( |
| ; CHECK-NEXT: entry: |
| ; CHECK-NEXT: [[CALLOC:%.*]] = call ptr @calloc(i64 1, i64 [[SIZE:%.*]]) |
| ; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[CALLOC]], null |
| ; CHECK-NEXT: br i1 [[CMP]], label [[CLEANUP:%.*]], label [[IF_END:%.*]] |
| ; CHECK: if.end: |
| ; CHECK-NEXT: br label [[CLEANUP]] |
| ; CHECK: cleanup: |
| ; CHECK-NEXT: [[RETVAL_0:%.*]] = phi ptr [ [[CALLOC]], [[IF_END]] ], [ null, [[ENTRY:%.*]] ] |
| ; CHECK-NEXT: ret ptr [[RETVAL_0]] |
| ; |
| entry: |
| %call = call ptr @malloc(i64 %size) inaccessiblememonly |
| %cmp = icmp eq ptr %call, null |
| br i1 %cmp, label %cleanup, label %if.end |
| if.end: |
| call void @llvm.memset.p0.i64(ptr %call, i8 0, i64 %size, i1 false) |
| br label %cleanup |
| cleanup: |
| %retval.0 = phi ptr [ %call, %if.end ], [ null, %entry ] |
| ret ptr %retval.0 |
| } |
| |
| define ptr @pr25892_with_extra_store(i64 %size) { |
| ; CHECK-LABEL: @pr25892_with_extra_store( |
| ; CHECK-NEXT: entry: |
| ; CHECK-NEXT: [[CALLOC:%.*]] = call ptr @calloc(i64 1, i64 [[SIZE:%.*]]) |
| ; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[CALLOC]], null |
| ; CHECK-NEXT: br i1 [[CMP]], label [[CLEANUP:%.*]], label [[IF_END:%.*]] |
| ; CHECK: if.end: |
| ; CHECK-NEXT: br label [[CLEANUP]] |
| ; CHECK: cleanup: |
| ; CHECK-NEXT: [[RETVAL_0:%.*]] = phi ptr [ [[CALLOC]], [[IF_END]] ], [ null, [[ENTRY:%.*]] ] |
| ; CHECK-NEXT: ret ptr [[RETVAL_0]] |
| ; |
| entry: |
| %call = call ptr @malloc(i64 %size) inaccessiblememonly |
| %cmp = icmp eq ptr %call, null |
| br i1 %cmp, label %cleanup, label %if.end |
| if.end: |
| call void @llvm.memset.p0.i64(ptr %call, i8 0, i64 %size, i1 false) |
| store i8 0, ptr %call, align 1 |
| br label %cleanup |
| cleanup: |
| %retval.0 = phi ptr [ %call, %if.end ], [ null, %entry ] |
| ret ptr %retval.0 |
| } |
| |
| ; This should not create a calloc |
| define ptr @malloc_with_no_nointer_null_check(i64 %0, i32 %1) { |
| ; CHECK-LABEL: @malloc_with_no_nointer_null_check( |
| ; CHECK-NEXT: entry: |
| ; CHECK-NEXT: [[CALL:%.*]] = call ptr @malloc(i64 [[TMP0:%.*]]) #[[ATTR7]] |
| ; CHECK-NEXT: [[A:%.*]] = and i32 [[TMP1:%.*]], 32 |
| ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[A]], 0 |
| ; CHECK-NEXT: br i1 [[CMP]], label [[CLEANUP:%.*]], label [[IF_END:%.*]] |
| ; CHECK: if.end: |
| ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr [[CALL]], i8 0, i64 [[TMP0]], i1 false) |
| ; CHECK-NEXT: br label [[CLEANUP]] |
| ; CHECK: cleanup: |
| ; CHECK-NEXT: ret ptr [[CALL]] |
| ; |
| entry: |
| %call = call ptr @malloc(i64 %0) inaccessiblememonly |
| %a = and i32 %1, 32 |
| %cmp = icmp eq i32 %a, 0 |
| br i1 %cmp, label %cleanup, label %if.end |
| if.end: |
| call void @llvm.memset.p0.i64(ptr %call, i8 0, i64 %0, i1 false) |
| br label %cleanup |
| cleanup: |
| ret ptr %call |
| } |
| |
| ; PR50143 |
| define ptr @store_zero_after_calloc_inaccessiblememonly() { |
| ; CHECK-LABEL: @store_zero_after_calloc_inaccessiblememonly( |
| ; CHECK-NEXT: [[CALL:%.*]] = tail call ptr @calloc(i64 1, i64 10) #[[ATTR7]] |
| ; CHECK-NEXT: ret ptr [[CALL]] |
| ; |
| %call = tail call ptr @calloc(i64 1, i64 10) inaccessiblememonly |
| store i8 0, ptr %call |
| ret ptr %call |
| } |
| |
| define ptr @zero_memset_after_calloc() { |
| ; CHECK-LABEL: @zero_memset_after_calloc( |
| ; CHECK-NEXT: [[CALL:%.*]] = tail call ptr @calloc(i64 10000, i64 4) |
| ; CHECK-NEXT: ret ptr [[CALL]] |
| ; |
| %call = tail call ptr @calloc(i64 10000, i64 4) |
| call void @llvm.memset.p0.i64(ptr %call, i8 0, i64 40000, i1 false) |
| ret ptr %call |
| } |
| |
| define ptr @volatile_zero_memset_after_calloc() { |
| ; CHECK-LABEL: @volatile_zero_memset_after_calloc( |
| ; CHECK-NEXT: [[CALL:%.*]] = tail call ptr @calloc(i64 10000, i64 4) |
| ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr [[CALL]], i8 0, i64 40000, i1 true) |
| ; CHECK-NEXT: ret ptr [[CALL]] |
| ; |
| %call = tail call ptr @calloc(i64 10000, i64 4) |
| call void @llvm.memset.p0.i64(ptr %call, i8 0, i64 40000, i1 true) |
| ret ptr %call |
| } |
| |
| define ptr @zero_memset_and_store_after_calloc(i8 %v) { |
| ; CHECK-LABEL: @zero_memset_and_store_after_calloc( |
| ; CHECK-NEXT: [[CALL:%.*]] = tail call ptr @calloc(i64 10000, i64 4) |
| ; CHECK-NEXT: ret ptr [[CALL]] |
| ; |
| %call = tail call ptr @calloc(i64 10000, i64 4) |
| store i8 %v, ptr %call |
| call void @llvm.memset.p0.i64(ptr %call, i8 0, i64 40000, i1 false) |
| ret ptr %call |
| } |
| |
| define ptr @partial_zero_memset_after_calloc() { |
| ; CHECK-LABEL: @partial_zero_memset_after_calloc( |
| ; CHECK-NEXT: [[CALL:%.*]] = tail call ptr @calloc(i64 10000, i64 4) |
| ; CHECK-NEXT: ret ptr [[CALL]] |
| ; |
| %call = tail call ptr @calloc(i64 10000, i64 4) |
| call void @llvm.memset.p0.i64(ptr %call, i8 0, i64 20, i1 false) |
| ret ptr %call |
| } |
| |
| define ptr @partial_zero_memset_and_store_after_calloc(i8 %v) { |
| ; CHECK-LABEL: @partial_zero_memset_and_store_after_calloc( |
| ; CHECK-NEXT: [[CALL:%.*]] = tail call ptr @calloc(i64 10000, i64 4) |
| ; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, ptr [[CALL]], i64 30 |
| ; CHECK-NEXT: store i8 [[V:%.*]], ptr [[GEP]], align 1 |
| ; CHECK-NEXT: ret ptr [[CALL]] |
| ; |
| %call = tail call ptr @calloc(i64 10000, i64 4) |
| %gep = getelementptr inbounds i8, ptr %call, i64 30 |
| store i8 %v, ptr %gep |
| call void @llvm.memset.p0.i64(ptr %call, i8 0, i64 20, i1 false) |
| ret ptr %call |
| } |
| |
| define ptr @zero_memset_and_store_with_dyn_index_after_calloc(i8 %v, i64 %idx) { |
| ; CHECK-LABEL: @zero_memset_and_store_with_dyn_index_after_calloc( |
| ; CHECK-NEXT: [[CALL:%.*]] = tail call ptr @calloc(i64 10000, i64 4) |
| ; CHECK-NEXT: ret ptr [[CALL]] |
| ; |
| %call = tail call ptr @calloc(i64 10000, i64 4) |
| %gep = getelementptr inbounds i8, ptr %call, i64 %idx |
| store i8 %v, ptr %gep |
| call void @llvm.memset.p0.i64(ptr %call, i8 0, i64 40000, i1 false) |
| ret ptr %call |
| } |
| |
| define ptr @partial_zero_memset_and_store_with_dyn_index_after_calloc(i8 %v, i64 %idx) { |
| ; CHECK-LABEL: @partial_zero_memset_and_store_with_dyn_index_after_calloc( |
| ; CHECK-NEXT: [[CALL:%.*]] = tail call ptr @calloc(i64 10000, i64 4) |
| ; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, ptr [[CALL]], i64 [[IDX:%.*]] |
| ; CHECK-NEXT: store i8 [[V:%.*]], ptr [[GEP]], align 1 |
| ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr [[CALL]], i8 0, i64 20, i1 false) |
| ; CHECK-NEXT: ret ptr [[CALL]] |
| ; |
| %call = tail call ptr @calloc(i64 10000, i64 4) |
| %gep = getelementptr inbounds i8, ptr %call, i64 %idx |
| store i8 %v, ptr %gep |
| call void @llvm.memset.p0.i64(ptr %call, i8 0, i64 20, i1 false) |
| ret ptr %call |
| } |
| |
| define ptr @zero_memset_after_calloc_inaccessiblememonly() { |
| ; CHECK-LABEL: @zero_memset_after_calloc_inaccessiblememonly( |
| ; CHECK-NEXT: [[CALL:%.*]] = tail call ptr @calloc(i64 10000, i64 4) #[[ATTR7]] |
| ; CHECK-NEXT: ret ptr [[CALL]] |
| ; |
| %call = tail call ptr @calloc(i64 10000, i64 4) inaccessiblememonly |
| call void @llvm.memset.p0.i64(ptr %call, i8 0, i64 40000, i1 false) |
| ret ptr %call |
| } |
| |
| define ptr @cst_nonzero_memset_after_calloc() { |
| ; CHECK-LABEL: @cst_nonzero_memset_after_calloc( |
| ; CHECK-NEXT: [[CALL:%.*]] = tail call ptr @calloc(i64 10000, i64 4) |
| ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr [[CALL]], i8 1, i64 40000, i1 false) |
| ; CHECK-NEXT: ret ptr [[CALL]] |
| ; |
| %call = tail call ptr @calloc(i64 10000, i64 4) |
| call void @llvm.memset.p0.i64(ptr %call, i8 1, i64 40000, i1 false) |
| ret ptr %call |
| } |
| |
| define ptr @nonzero_memset_after_calloc(i8 %v) { |
| ; CHECK-LABEL: @nonzero_memset_after_calloc( |
| ; CHECK-NEXT: [[CALL:%.*]] = tail call ptr @calloc(i64 10000, i64 4) |
| ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr [[CALL]], i8 [[V:%.*]], i64 40000, i1 false) |
| ; CHECK-NEXT: ret ptr [[CALL]] |
| ; |
| %call = tail call ptr @calloc(i64 10000, i64 4) |
| call void @llvm.memset.p0.i64(ptr %call, i8 %v, i64 40000, i1 false) |
| ret ptr %call |
| } |
| |
| ; PR11896 |
| ; The first memset is dead, because calloc provides zero-filled memory. |
| ; TODO: This could be replaced with a call to malloc + memset_pattern16. |
| define ptr @memset_pattern16_after_calloc(ptr %pat) { |
| ; CHECK-LABEL: @memset_pattern16_after_calloc( |
| ; CHECK-NEXT: [[CALL:%.*]] = tail call ptr @calloc(i64 10000, i64 4) |
| ; CHECK-NEXT: call void @memset_pattern16(ptr [[CALL]], ptr [[PAT:%.*]], i64 40000) |
| ; CHECK-NEXT: ret ptr [[CALL]] |
| ; |
| %call = tail call ptr @calloc(i64 10000, i64 4) #1 |
| call void @llvm.memset.p0.i64(ptr align 4 %call, i8 0, i64 40000, i1 false) |
| call void @memset_pattern16(ptr %call, ptr %pat, i64 40000) #1 |
| ret ptr %call |
| } |
| |
| @n = global i32 0, align 4 |
| @a = external global i32, align 4 |
| @b = external global ptr, align 8 |
| |
| ; GCC calloc-1.c test case should create calloc |
| define ptr @test_malloc_memset_to_calloc(ptr %0) { |
| ; CHECK-LABEL: @test_malloc_memset_to_calloc( |
| ; CHECK-NEXT: entry: |
| ; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @n, align 4 |
| ; CHECK-NEXT: [[TMP2:%.*]] = sext i32 [[TMP1]] to i64 |
| ; CHECK-NEXT: [[CALLOC:%.*]] = call ptr @calloc(i64 1, i64 [[TMP2]]) |
| ; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr [[TMP0:%.*]], align 8 |
| ; CHECK-NEXT: [[TMP4:%.*]] = add nsw i64 [[TMP3]], 1 |
| ; CHECK-NEXT: store i64 [[TMP4]], ptr [[TMP0]], align 8 |
| ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq ptr [[CALLOC]], null |
| ; CHECK-NEXT: br i1 [[TMP5]], label [[IF_END:%.*]], label [[IF_THEN:%.*]] |
| ; CHECK: if.then: |
| ; CHECK-NEXT: [[TMP6:%.*]] = add nsw i64 [[TMP3]], 2 |
| ; CHECK-NEXT: store i64 [[TMP6]], ptr [[TMP0]], align 8 |
| ; CHECK-NEXT: store i32 2, ptr @a, align 4 |
| ; CHECK-NEXT: [[TMP7:%.*]] = load ptr, ptr @b, align 8 |
| ; CHECK-NEXT: store i32 3, ptr [[TMP7]], align 4 |
| ; CHECK-NEXT: br label [[IF_END]] |
| ; CHECK: if.end: |
| ; CHECK-NEXT: ret ptr [[CALLOC]] |
| ; |
| entry: |
| %1 = load i32, ptr @n, align 4 |
| %2 = sext i32 %1 to i64 |
| %3 = tail call ptr @malloc(i64 %2) inaccessiblememonly |
| %4 = load i64, ptr %0, align 8 |
| %5 = add nsw i64 %4, 1 |
| store i64 %5, ptr %0, align 8 |
| %6 = icmp eq ptr %3, null |
| br i1 %6, label %if.end, label %if.then |
| |
| if.then: |
| %7 = add nsw i64 %4, 2 |
| store i64 %7, ptr %0, align 8 |
| store i32 2, ptr @a, align 4 |
| tail call void @llvm.memset.p0.i64(ptr align 4 %3, i8 0, i64 %2, i1 false) |
| %8 = load ptr, ptr @b, align 8 |
| store i32 3, ptr %8, align 4 |
| br label %if.end |
| |
| if.end: |
| ret ptr %3 |
| } |
| |
| define ptr @readnone_malloc() { |
| ; CHECK-LABEL: @readnone_malloc( |
| ; CHECK-NEXT: [[ALLOC:%.*]] = call ptr @malloc(i64 16) #[[ATTR8:[0-9]+]] |
| ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr [[ALLOC]], i8 0, i64 16, i1 false) |
| ; CHECK-NEXT: ret ptr [[ALLOC]] |
| ; |
| %alloc = call ptr @malloc(i64 16) memory(none) |
| call void @llvm.memset.p0.i64(ptr %alloc, i8 0, i64 16, i1 false) |
| ret ptr %alloc |
| } |
| |
| define void @store_same_i32_to_mayalias_loc(ptr %q, ptr %p) { |
| ; CHECK-LABEL: @store_same_i32_to_mayalias_loc( |
| ; CHECK-NEXT: [[V:%.*]] = load i32, ptr [[P:%.*]], align 4 |
| ; CHECK-NEXT: store i32 [[V]], ptr [[Q:%.*]], align 4 |
| ; CHECK-NEXT: store i32 [[V]], ptr [[P]], align 4 |
| ; CHECK-NEXT: ret void |
| ; |
| %v = load i32, ptr %p, align 4 |
| store i32 %v, ptr %q, align 4 |
| store i32 %v, ptr %p, align 4 |
| ret void |
| } |
| |
| define void @store_same_i32_to_mayalias_loc_unalign(ptr %q, ptr %p) { |
| ; CHECK-LABEL: @store_same_i32_to_mayalias_loc_unalign( |
| ; CHECK-NEXT: [[V:%.*]] = load i32, ptr [[P:%.*]], align 1 |
| ; CHECK-NEXT: store i32 [[V]], ptr [[Q:%.*]], align 1 |
| ; CHECK-NEXT: store i32 [[V]], ptr [[P]], align 1 |
| ; CHECK-NEXT: ret void |
| ; |
| %v = load i32, ptr %p, align 1 |
| store i32 %v, ptr %q, align 1 |
| store i32 %v, ptr %p, align 1 |
| ret void |
| } |
| |
| define void @store_same_i12_to_mayalias_loc(ptr %q, ptr %p) { |
| ; CHECK-LABEL: @store_same_i12_to_mayalias_loc( |
| ; CHECK-NEXT: [[V:%.*]] = load i12, ptr [[P:%.*]], align 2 |
| ; CHECK-NEXT: store i12 [[V]], ptr [[Q:%.*]], align 2 |
| ; CHECK-NEXT: store i12 [[V]], ptr [[P]], align 2 |
| ; CHECK-NEXT: ret void |
| ; |
| %v = load i12, ptr %p, align 2 |
| store i12 %v, ptr %q, align 2 |
| store i12 %v, ptr %p, align 2 |
| ret void |
| } |
| |
| define void @store_same_i12_to_mayalias_loc_unalign(ptr %q, ptr %p) { |
| ; CHECK-LABEL: @store_same_i12_to_mayalias_loc_unalign( |
| ; CHECK-NEXT: [[V:%.*]] = load i12, ptr [[P:%.*]], align 1 |
| ; CHECK-NEXT: store i12 [[V]], ptr [[Q:%.*]], align 1 |
| ; CHECK-NEXT: store i12 [[V]], ptr [[P]], align 1 |
| ; CHECK-NEXT: ret void |
| ; |
| %v = load i12, ptr %p, align 1 |
| store i12 %v, ptr %q, align 1 |
| store i12 %v, ptr %p, align 1 |
| ret void |
| } |
| |
| define void @store_same_ptr_to_mayalias_loc(ptr %q, ptr %p) { |
| ; CHECK-LABEL: @store_same_ptr_to_mayalias_loc( |
| ; CHECK-NEXT: [[V:%.*]] = load ptr, ptr [[P:%.*]], align 8 |
| ; CHECK-NEXT: store ptr [[V]], ptr [[Q:%.*]], align 8 |
| ; CHECK-NEXT: store ptr [[V]], ptr [[P]], align 8 |
| ; CHECK-NEXT: ret void |
| ; |
| %v = load ptr, ptr %p, align 8 |
| store ptr %v, ptr %q, align 8 |
| store ptr %v, ptr %p, align 8 |
| ret void |
| } |
| |
| define void @store_same_scalable_to_mayalias_loc(ptr %q, ptr %p) { |
| ; CHECK-LABEL: @store_same_scalable_to_mayalias_loc( |
| ; CHECK-NEXT: [[V:%.*]] = load <vscale x 4 x i32>, ptr [[P:%.*]], align 4 |
| ; CHECK-NEXT: store <vscale x 4 x i32> [[V]], ptr [[Q:%.*]], align 4 |
| ; CHECK-NEXT: store <vscale x 4 x i32> [[V]], ptr [[P]], align 4 |
| ; CHECK-NEXT: ret void |
| ; |
| %v = load <vscale x 4 x i32>, ptr %p, align 4 |
| store <vscale x 4 x i32> %v, ptr %q, align 4 |
| store <vscale x 4 x i32> %v, ptr %p, align 4 |
| ret void |
| } |
| |
| define void @store_same_i32_to_mayalias_loc_inconsistent_align(ptr %q, ptr %p) { |
| ; CHECK-LABEL: @store_same_i32_to_mayalias_loc_inconsistent_align( |
| ; CHECK-NEXT: [[V:%.*]] = load i32, ptr [[P:%.*]], align 2 |
| ; CHECK-NEXT: store i32 [[V]], ptr [[Q:%.*]], align 4 |
| ; CHECK-NEXT: store i32 [[V]], ptr [[P]], align 4 |
| ; CHECK-NEXT: ret void |
| ; |
| %v = load i32, ptr %p, align 2 |
| store i32 %v, ptr %q, align 4 |
| store i32 %v, ptr %p, align 4 |
| ret void |
| } |
| |
| define void @do_not_crash_on_liveonentrydef(i1 %c, ptr %p, ptr noalias %q) { |
| ; CHECK-LABEL: @do_not_crash_on_liveonentrydef( |
| ; CHECK-NEXT: entry: |
| ; CHECK-NEXT: br i1 [[C:%.*]], label [[IF:%.*]], label [[JOIN:%.*]] |
| ; CHECK: if: |
| ; CHECK-NEXT: store i8 0, ptr [[Q:%.*]], align 1 |
| ; CHECK-NEXT: br label [[JOIN]] |
| ; CHECK: join: |
| ; CHECK-NEXT: [[V:%.*]] = load i8, ptr [[Q]], align 1 |
| ; CHECK-NEXT: store i8 0, ptr [[P:%.*]], align 1 |
| ; CHECK-NEXT: store i8 [[V]], ptr [[Q]], align 1 |
| ; CHECK-NEXT: ret void |
| ; |
| entry: |
| br i1 %c, label %if, label %join |
| |
| if: |
| store i8 0, ptr %q, align 1 |
| br label %join |
| |
| join: |
| %v = load i8, ptr %q, align 1 |
| store i8 0, ptr %p, align 1 |
| store i8 %v, ptr %q, align 1 |
| ret void |
| } |