blob: 61e5e8155d359a6bcf1eae6a56e26cd1b7ae3b5a [file] [log] [blame]
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; MemCpy optimizations should take place even in presence of invariant.start
; RUN: opt < %s -basic-aa -memcpyopt -S -enable-memcpyopt-memoryssa=0 | FileCheck %s --check-prefixes=CHECK,NO_MSSA
; RUN: opt < %s -basic-aa -memcpyopt -S -enable-memcpyopt-memoryssa=1 -verify-memoryssa | FileCheck %s --check-prefixes=CHECK,MSSA
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
target triple = "i686-apple-darwin9"
%0 = type { x86_fp80, x86_fp80 }
declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind
declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1)
declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1)
declare {}* @llvm.invariant.start.p0i8(i64, i8* nocapture) nounwind readonly
; FIXME: The invariant.start does not modify %P.
; The intermediate alloca and one of the memcpy's should be eliminated, the
; other should be transformed to a memmove.
define void @test1(i8* %P, i8* %Q) nounwind {
; NO_MSSA-LABEL: @test1(
; NO_MSSA-NEXT: [[MEMTMP:%.*]] = alloca [[TMP0:%.*]], align 16
; NO_MSSA-NEXT: [[R:%.*]] = bitcast %0* [[MEMTMP]] to i8*
; NO_MSSA-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 16 [[R]], i8* align 16 [[P:%.*]], i32 32, i1 false)
; NO_MSSA-NEXT: [[I:%.*]] = call {}* @llvm.invariant.start.p0i8(i64 32, i8* [[P]])
; NO_MSSA-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 16 [[Q:%.*]], i8* align 16 [[R]], i32 32, i1 false)
; NO_MSSA-NEXT: ret void
;
; MSSA-LABEL: @test1(
; MSSA-NEXT: [[MEMTMP:%.*]] = alloca [[TMP0:%.*]], align 16
; MSSA-NEXT: [[R:%.*]] = bitcast %0* [[MEMTMP]] to i8*
; MSSA-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 16 [[R]], i8* align 16 [[P:%.*]], i32 32, i1 false)
; MSSA-NEXT: [[I:%.*]] = call {}* @llvm.invariant.start.p0i8(i64 32, i8* [[P]])
; MSSA-NEXT: call void @llvm.memmove.p0i8.p0i8.i32(i8* align 16 [[Q:%.*]], i8* align 16 [[P]], i32 32, i1 false)
; MSSA-NEXT: ret void
;
%memtmp = alloca %0, align 16
%R = bitcast %0* %memtmp to i8*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 16 %R, i8* align 16 %P, i32 32, i1 false)
%i = call {}* @llvm.invariant.start.p0i8(i64 32, i8* %P)
call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 16 %Q, i8* align 16 %R, i32 32, i1 false)
ret void
}
; The invariant.start intrinsic does not inhibit tranforming the memcpy to a
; memset.
define void @test2(i8* %dst1, i8* %dst2, i8 %c) {
; CHECK-LABEL: @test2(
; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* [[DST1:%.*]], i8 [[C:%.*]], i64 128, i1 false)
; CHECK-NEXT: [[I:%.*]] = call {}* @llvm.invariant.start.p0i8(i64 32, i8* [[DST1]])
; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 8 [[DST2:%.*]], i8 [[C]], i64 128, i1 false)
; CHECK-NEXT: ret void
;
call void @llvm.memset.p0i8.i64(i8* %dst1, i8 %c, i64 128, i1 false)
%i = call {}* @llvm.invariant.start.p0i8(i64 32, i8* %dst1)
call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %dst2, i8* align 8 %dst1, i64 128, i1 false)
ret void
}