blob: 3b9b84297779d112d49e39bedd778119a0bb7d7f [file] [log] [blame]
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -sroa -S | FileCheck %s
; RUN: opt < %s -passes=sroa -S | FileCheck %s
target datalayout = "e-p:64:64:64-p1:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n8:16:32:64"
declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
define i32 @test0() {
; CHECK-LABEL: @test0(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[V2_INT:%.*]] = bitcast float 0.000000e+00 to i32
; CHECK-NEXT: [[SUM1:%.*]] = add i32 0, [[V2_INT]]
; CHECK-NEXT: ret i32 [[SUM1]]
;
entry:
%a1 = alloca i32
%a2 = alloca float
%a1.i8 = bitcast i32* %a1 to i8*
call void @llvm.lifetime.start.p0i8(i64 4, i8* %a1.i8)
store i32 0, i32* %a1
%v1 = load i32, i32* %a1
call void @llvm.lifetime.end.p0i8(i64 4, i8* %a1.i8)
%a2.i8 = bitcast float* %a2 to i8*
call void @llvm.lifetime.start.p0i8(i64 4, i8* %a2.i8)
store float 0.0, float* %a2
%v2 = load float , float * %a2
%v2.int = bitcast float %v2 to i32
%sum1 = add i32 %v1, %v2.int
call void @llvm.lifetime.end.p0i8(i64 4, i8* %a2.i8)
ret i32 %sum1
}
define i32 @test1() {
; CHECK-LABEL: @test1(
; CHECK-NEXT: entry:
; CHECK-NEXT: ret i32 0
;
entry:
%X = alloca { i32, float }
%Y = getelementptr { i32, float }, { i32, float }* %X, i64 0, i32 0
store i32 0, i32* %Y
%Z = load i32, i32* %Y
ret i32 %Z
}
define i64 @test2(i64 %X) {
; CHECK-LABEL: @test2(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[L2:%.*]]
; CHECK: L2:
; CHECK-NEXT: ret i64 [[X:%.*]]
;
entry:
%A = alloca [8 x i8]
%B = bitcast [8 x i8]* %A to i64*
store i64 %X, i64* %B
br label %L2
L2:
%Z = load i64, i64* %B
ret i64 %Z
}
define i64 @test2_addrspacecast(i64 %X) {
; CHECK-LABEL: @test2_addrspacecast(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[L2:%.*]]
; CHECK: L2:
; CHECK-NEXT: ret i64 [[X:%.*]]
;
entry:
%A = alloca [8 x i8]
%B = addrspacecast [8 x i8]* %A to i64 addrspace(1)*
store i64 %X, i64 addrspace(1)* %B
br label %L2
L2:
%Z = load i64, i64 addrspace(1)* %B
ret i64 %Z
}
define i64 @test2_addrspacecast_gep(i64 %X, i16 %idx) {
; CHECK-LABEL: @test2_addrspacecast_gep(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[L2:%.*]]
; CHECK: L2:
; CHECK-NEXT: ret i64 [[X:%.*]]
;
entry:
%A = alloca [256 x i8]
%B = addrspacecast [256 x i8]* %A to i64 addrspace(1)*
%gepA = getelementptr [256 x i8], [256 x i8]* %A, i16 0, i16 32
%gepB = getelementptr i64, i64 addrspace(1)* %B, i16 4
store i64 %X, i64 addrspace(1)* %gepB, align 1
br label %L2
L2:
%gepA.bc = bitcast i8* %gepA to i64*
%Z = load i64, i64* %gepA.bc, align 1
ret i64 %Z
}
; Avoid crashing when load/storing at at different offsets.
define i64 @test2_addrspacecast_gep_offset(i64 %X) {
; CHECK-LABEL: @test2_addrspacecast_gep_offset(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A_SROA_0:%.*]] = alloca [10 x i8], align 1
; CHECK-NEXT: [[A_SROA_0_2_GEPB_SROA_IDX:%.*]] = getelementptr inbounds [10 x i8], [10 x i8]* [[A_SROA_0]], i16 0, i16 2
; CHECK-NEXT: [[A_SROA_0_2_GEPB_SROA_CAST:%.*]] = addrspacecast i8* [[A_SROA_0_2_GEPB_SROA_IDX]] to i64 addrspace(1)*
; CHECK-NEXT: store i64 [[X:%.*]], i64 addrspace(1)* [[A_SROA_0_2_GEPB_SROA_CAST]], align 1
; CHECK-NEXT: br label [[L2:%.*]]
; CHECK: L2:
; CHECK-NEXT: [[A_SROA_0_0_GEPA_BC_SROA_CAST:%.*]] = bitcast [10 x i8]* [[A_SROA_0]] to i64*
; CHECK-NEXT: [[A_SROA_0_0_A_SROA_0_30_Z:%.*]] = load i64, i64* [[A_SROA_0_0_GEPA_BC_SROA_CAST]], align 1
; CHECK-NEXT: ret i64 [[A_SROA_0_0_A_SROA_0_30_Z]]
;
entry:
%A = alloca [256 x i8]
%B = addrspacecast [256 x i8]* %A to i64 addrspace(1)*
%gepA = getelementptr [256 x i8], [256 x i8]* %A, i16 0, i16 30
%gepB = getelementptr i64, i64 addrspace(1)* %B, i16 4
store i64 %X, i64 addrspace(1)* %gepB, align 1
br label %L2
L2:
%gepA.bc = bitcast i8* %gepA to i64*
%Z = load i64, i64* %gepA.bc, align 1
ret i64 %Z
}
define void @test3(i8* %dst, i8* align 8 %src) {
; CHECK-LABEL: @test3(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A_SROA_0:%.*]] = alloca [42 x i8], align 1
; CHECK-NEXT: [[A_SROA_3:%.*]] = alloca [99 x i8], align 1
; CHECK-NEXT: [[A_SROA_34:%.*]] = alloca [16 x i8], align 1
; CHECK-NEXT: [[A_SROA_15:%.*]] = alloca [42 x i8], align 1
; CHECK-NEXT: [[A_SROA_16:%.*]] = alloca [7 x i8], align 1
; CHECK-NEXT: [[A_SROA_239:%.*]] = alloca [7 x i8], align 1
; CHECK-NEXT: [[A_SROA_31:%.*]] = alloca [85 x i8], align 1
; CHECK-NEXT: [[A_SROA_0_0_B_SROA_IDX:%.*]] = getelementptr inbounds [42 x i8], [42 x i8]* [[A_SROA_0]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_0_0_B_SROA_IDX]], i8* align 8 [[SRC:%.*]], i32 42, i1 false), !tbaa [[TBAA0:![0-9]+]]
; CHECK-NEXT: [[A_SROA_2_0_SRC_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 42
; CHECK-NEXT: [[A_SROA_2_0_COPYLOAD:%.*]] = load i8, i8* [[A_SROA_2_0_SRC_SROA_RAW_IDX]], align 2, !tbaa [[TBAA0]]
; CHECK-NEXT: [[A_SROA_3_0_SRC_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 43
; CHECK-NEXT: [[A_SROA_3_0_B_SROA_IDX:%.*]] = getelementptr inbounds [99 x i8], [99 x i8]* [[A_SROA_3]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_3_0_B_SROA_IDX]], i8* align 1 [[A_SROA_3_0_SRC_SROA_RAW_IDX]], i32 99, i1 false), !tbaa [[TBAA0]]
; CHECK-NEXT: [[A_SROA_34_0_SRC_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 142
; CHECK-NEXT: [[A_SROA_34_0_B_SROA_IDX:%.*]] = getelementptr inbounds [16 x i8], [16 x i8]* [[A_SROA_34]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_34_0_B_SROA_IDX]], i8* align 2 [[A_SROA_34_0_SRC_SROA_RAW_IDX]], i32 16, i1 false), !tbaa [[TBAA0]]
; CHECK-NEXT: [[A_SROA_15_0_SRC_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 158
; CHECK-NEXT: [[A_SROA_15_0_B_SROA_IDX:%.*]] = getelementptr inbounds [42 x i8], [42 x i8]* [[A_SROA_15]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_15_0_B_SROA_IDX]], i8* align 2 [[A_SROA_15_0_SRC_SROA_RAW_IDX]], i32 42, i1 false), !tbaa [[TBAA0]]
; CHECK-NEXT: [[A_SROA_16_0_SRC_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 200
; CHECK-NEXT: [[A_SROA_16_0_B_SROA_IDX:%.*]] = getelementptr inbounds [7 x i8], [7 x i8]* [[A_SROA_16]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_16_0_B_SROA_IDX]], i8* align 8 [[A_SROA_16_0_SRC_SROA_RAW_IDX]], i32 7, i1 false), !tbaa [[TBAA0]]
; CHECK-NEXT: [[A_SROA_23_0_SRC_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 207
; CHECK-NEXT: [[A_SROA_23_0_COPYLOAD:%.*]] = load i8, i8* [[A_SROA_23_0_SRC_SROA_RAW_IDX]], align 1, !tbaa [[TBAA0]]
; CHECK-NEXT: [[A_SROA_239_0_SRC_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 208
; CHECK-NEXT: [[A_SROA_239_0_B_SROA_IDX:%.*]] = getelementptr inbounds [7 x i8], [7 x i8]* [[A_SROA_239]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_239_0_B_SROA_IDX]], i8* align 8 [[A_SROA_239_0_SRC_SROA_RAW_IDX]], i32 7, i1 false), !tbaa [[TBAA0]]
; CHECK-NEXT: [[A_SROA_31_0_SRC_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 215
; CHECK-NEXT: [[A_SROA_31_0_B_SROA_IDX:%.*]] = getelementptr inbounds [85 x i8], [85 x i8]* [[A_SROA_31]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_31_0_B_SROA_IDX]], i8* align 1 [[A_SROA_31_0_SRC_SROA_RAW_IDX]], i32 85, i1 false), !tbaa [[TBAA0]]
; CHECK-NEXT: [[A_SROA_34_0_OVERLAP_1_I8_SROA_IDX:%.*]] = getelementptr inbounds [16 x i8], [16 x i8]* [[A_SROA_34]], i64 0, i64 0
; CHECK-NEXT: store i8 1, i8* [[A_SROA_34_0_OVERLAP_1_I8_SROA_IDX]], align 1, !tbaa [[TBAA3:![0-9]+]]
; CHECK-NEXT: [[A_SROA_34_0_OVERLAP_1_I16_SROA_CAST:%.*]] = bitcast [16 x i8]* [[A_SROA_34]] to i16*
; CHECK-NEXT: store i16 1, i16* [[A_SROA_34_0_OVERLAP_1_I16_SROA_CAST]], align 1, !tbaa [[TBAA5:![0-9]+]]
; CHECK-NEXT: [[A_SROA_34_0_OVERLAP_1_I32_SROA_CAST:%.*]] = bitcast [16 x i8]* [[A_SROA_34]] to i32*
; CHECK-NEXT: store i32 1, i32* [[A_SROA_34_0_OVERLAP_1_I32_SROA_CAST]], align 1, !tbaa [[TBAA7:![0-9]+]]
; CHECK-NEXT: [[A_SROA_34_0_OVERLAP_1_I64_SROA_CAST:%.*]] = bitcast [16 x i8]* [[A_SROA_34]] to i64*
; CHECK-NEXT: store i64 1, i64* [[A_SROA_34_0_OVERLAP_1_I64_SROA_CAST]], align 1, !tbaa [[TBAA9:![0-9]+]]
; CHECK-NEXT: [[A_SROA_34_1_OVERLAP_2_I64_SROA_IDX:%.*]] = getelementptr inbounds [16 x i8], [16 x i8]* [[A_SROA_34]], i64 0, i64 1
; CHECK-NEXT: [[A_SROA_34_1_OVERLAP_2_I64_SROA_CAST:%.*]] = bitcast i8* [[A_SROA_34_1_OVERLAP_2_I64_SROA_IDX]] to i64*
; CHECK-NEXT: store i64 2, i64* [[A_SROA_34_1_OVERLAP_2_I64_SROA_CAST]], align 1, !tbaa [[TBAA11:![0-9]+]]
; CHECK-NEXT: [[A_SROA_34_2_OVERLAP_3_I64_SROA_IDX:%.*]] = getelementptr inbounds [16 x i8], [16 x i8]* [[A_SROA_34]], i64 0, i64 2
; CHECK-NEXT: [[A_SROA_34_2_OVERLAP_3_I64_SROA_CAST:%.*]] = bitcast i8* [[A_SROA_34_2_OVERLAP_3_I64_SROA_IDX]] to i64*
; CHECK-NEXT: store i64 3, i64* [[A_SROA_34_2_OVERLAP_3_I64_SROA_CAST]], align 1, !tbaa [[TBAA13:![0-9]+]]
; CHECK-NEXT: [[A_SROA_34_3_OVERLAP_4_I64_SROA_IDX:%.*]] = getelementptr inbounds [16 x i8], [16 x i8]* [[A_SROA_34]], i64 0, i64 3
; CHECK-NEXT: [[A_SROA_34_3_OVERLAP_4_I64_SROA_CAST:%.*]] = bitcast i8* [[A_SROA_34_3_OVERLAP_4_I64_SROA_IDX]] to i64*
; CHECK-NEXT: store i64 4, i64* [[A_SROA_34_3_OVERLAP_4_I64_SROA_CAST]], align 1, !tbaa [[TBAA15:![0-9]+]]
; CHECK-NEXT: [[A_SROA_34_4_OVERLAP_5_I64_SROA_IDX:%.*]] = getelementptr inbounds [16 x i8], [16 x i8]* [[A_SROA_34]], i64 0, i64 4
; CHECK-NEXT: [[A_SROA_34_4_OVERLAP_5_I64_SROA_CAST:%.*]] = bitcast i8* [[A_SROA_34_4_OVERLAP_5_I64_SROA_IDX]] to i64*
; CHECK-NEXT: store i64 5, i64* [[A_SROA_34_4_OVERLAP_5_I64_SROA_CAST]], align 1, !tbaa [[TBAA17:![0-9]+]]
; CHECK-NEXT: [[A_SROA_34_5_OVERLAP_6_I64_SROA_IDX:%.*]] = getelementptr inbounds [16 x i8], [16 x i8]* [[A_SROA_34]], i64 0, i64 5
; CHECK-NEXT: [[A_SROA_34_5_OVERLAP_6_I64_SROA_CAST:%.*]] = bitcast i8* [[A_SROA_34_5_OVERLAP_6_I64_SROA_IDX]] to i64*
; CHECK-NEXT: store i64 6, i64* [[A_SROA_34_5_OVERLAP_6_I64_SROA_CAST]], align 1, !tbaa [[TBAA19:![0-9]+]]
; CHECK-NEXT: [[A_SROA_34_6_OVERLAP_7_I64_SROA_IDX:%.*]] = getelementptr inbounds [16 x i8], [16 x i8]* [[A_SROA_34]], i64 0, i64 6
; CHECK-NEXT: [[A_SROA_34_6_OVERLAP_7_I64_SROA_CAST:%.*]] = bitcast i8* [[A_SROA_34_6_OVERLAP_7_I64_SROA_IDX]] to i64*
; CHECK-NEXT: store i64 7, i64* [[A_SROA_34_6_OVERLAP_7_I64_SROA_CAST]], align 1, !tbaa [[TBAA21:![0-9]+]]
; CHECK-NEXT: [[A_SROA_34_7_OVERLAP_8_I64_SROA_IDX:%.*]] = getelementptr inbounds [16 x i8], [16 x i8]* [[A_SROA_34]], i64 0, i64 7
; CHECK-NEXT: [[A_SROA_34_7_OVERLAP_8_I64_SROA_CAST:%.*]] = bitcast i8* [[A_SROA_34_7_OVERLAP_8_I64_SROA_IDX]] to i64*
; CHECK-NEXT: store i64 8, i64* [[A_SROA_34_7_OVERLAP_8_I64_SROA_CAST]], align 1, !tbaa [[TBAA23:![0-9]+]]
; CHECK-NEXT: [[A_SROA_34_8_OVERLAP_9_I64_SROA_IDX:%.*]] = getelementptr inbounds [16 x i8], [16 x i8]* [[A_SROA_34]], i64 0, i64 8
; CHECK-NEXT: [[A_SROA_34_8_OVERLAP_9_I64_SROA_CAST:%.*]] = bitcast i8* [[A_SROA_34_8_OVERLAP_9_I64_SROA_IDX]] to i64*
; CHECK-NEXT: store i64 9, i64* [[A_SROA_34_8_OVERLAP_9_I64_SROA_CAST]], align 1, !tbaa [[TBAA25:![0-9]+]]
; CHECK-NEXT: [[A_SROA_16_0_OVERLAP2_1_0_I8_SROA_IDX:%.*]] = getelementptr inbounds [7 x i8], [7 x i8]* [[A_SROA_16]], i64 0, i64 0
; CHECK-NEXT: store i8 1, i8* [[A_SROA_16_0_OVERLAP2_1_0_I8_SROA_IDX]], align 1, !tbaa [[TBAA27:![0-9]+]]
; CHECK-NEXT: [[A_SROA_16_0_OVERLAP2_1_0_I16_SROA_CAST:%.*]] = bitcast [7 x i8]* [[A_SROA_16]] to i16*
; CHECK-NEXT: store i16 1, i16* [[A_SROA_16_0_OVERLAP2_1_0_I16_SROA_CAST]], align 1, !tbaa [[TBAA29:![0-9]+]]
; CHECK-NEXT: [[A_SROA_16_0_OVERLAP2_1_0_I32_SROA_CAST:%.*]] = bitcast [7 x i8]* [[A_SROA_16]] to i32*
; CHECK-NEXT: store i32 1, i32* [[A_SROA_16_0_OVERLAP2_1_0_I32_SROA_CAST]], align 1, !tbaa [[TBAA31:![0-9]+]]
; CHECK-NEXT: [[A_SROA_16_1_OVERLAP2_1_1_I32_SROA_IDX:%.*]] = getelementptr inbounds [7 x i8], [7 x i8]* [[A_SROA_16]], i64 0, i64 1
; CHECK-NEXT: [[A_SROA_16_1_OVERLAP2_1_1_I32_SROA_CAST:%.*]] = bitcast i8* [[A_SROA_16_1_OVERLAP2_1_1_I32_SROA_IDX]] to i32*
; CHECK-NEXT: store i32 2, i32* [[A_SROA_16_1_OVERLAP2_1_1_I32_SROA_CAST]], align 1, !tbaa [[TBAA33:![0-9]+]]
; CHECK-NEXT: [[A_SROA_16_2_OVERLAP2_1_2_I32_SROA_IDX:%.*]] = getelementptr inbounds [7 x i8], [7 x i8]* [[A_SROA_16]], i64 0, i64 2
; CHECK-NEXT: [[A_SROA_16_2_OVERLAP2_1_2_I32_SROA_CAST:%.*]] = bitcast i8* [[A_SROA_16_2_OVERLAP2_1_2_I32_SROA_IDX]] to i32*
; CHECK-NEXT: store i32 3, i32* [[A_SROA_16_2_OVERLAP2_1_2_I32_SROA_CAST]], align 1, !tbaa [[TBAA35:![0-9]+]]
; CHECK-NEXT: [[A_SROA_16_3_OVERLAP2_1_3_I32_SROA_IDX:%.*]] = getelementptr inbounds [7 x i8], [7 x i8]* [[A_SROA_16]], i64 0, i64 3
; CHECK-NEXT: [[A_SROA_16_3_OVERLAP2_1_3_I32_SROA_CAST:%.*]] = bitcast i8* [[A_SROA_16_3_OVERLAP2_1_3_I32_SROA_IDX]] to i32*
; CHECK-NEXT: store i32 4, i32* [[A_SROA_16_3_OVERLAP2_1_3_I32_SROA_CAST]], align 1, !tbaa [[TBAA37:![0-9]+]]
; CHECK-NEXT: [[A_SROA_239_0_OVERLAP2_2_0_I32_SROA_CAST:%.*]] = bitcast [7 x i8]* [[A_SROA_239]] to i32*
; CHECK-NEXT: store i32 1, i32* [[A_SROA_239_0_OVERLAP2_2_0_I32_SROA_CAST]], align 1, !tbaa [[TBAA39:![0-9]+]]
; CHECK-NEXT: [[A_SROA_239_1_OVERLAP2_2_1_I8_SROA_IDX:%.*]] = getelementptr inbounds [7 x i8], [7 x i8]* [[A_SROA_239]], i64 0, i64 1
; CHECK-NEXT: store i8 1, i8* [[A_SROA_239_1_OVERLAP2_2_1_I8_SROA_IDX]], align 1, !tbaa [[TBAA41:![0-9]+]]
; CHECK-NEXT: [[A_SROA_239_1_OVERLAP2_2_1_I16_SROA_IDX:%.*]] = getelementptr inbounds [7 x i8], [7 x i8]* [[A_SROA_239]], i64 0, i64 1
; CHECK-NEXT: [[A_SROA_239_1_OVERLAP2_2_1_I16_SROA_CAST:%.*]] = bitcast i8* [[A_SROA_239_1_OVERLAP2_2_1_I16_SROA_IDX]] to i16*
; CHECK-NEXT: store i16 1, i16* [[A_SROA_239_1_OVERLAP2_2_1_I16_SROA_CAST]], align 1, !tbaa [[TBAA43:![0-9]+]]
; CHECK-NEXT: [[A_SROA_239_1_OVERLAP2_2_1_I32_SROA_IDX:%.*]] = getelementptr inbounds [7 x i8], [7 x i8]* [[A_SROA_239]], i64 0, i64 1
; CHECK-NEXT: [[A_SROA_239_1_OVERLAP2_2_1_I32_SROA_CAST:%.*]] = bitcast i8* [[A_SROA_239_1_OVERLAP2_2_1_I32_SROA_IDX]] to i32*
; CHECK-NEXT: store i32 1, i32* [[A_SROA_239_1_OVERLAP2_2_1_I32_SROA_CAST]], align 1, !tbaa [[TBAA45:![0-9]+]]
; CHECK-NEXT: [[A_SROA_239_2_OVERLAP2_2_2_I32_SROA_IDX:%.*]] = getelementptr inbounds [7 x i8], [7 x i8]* [[A_SROA_239]], i64 0, i64 2
; CHECK-NEXT: [[A_SROA_239_2_OVERLAP2_2_2_I32_SROA_CAST:%.*]] = bitcast i8* [[A_SROA_239_2_OVERLAP2_2_2_I32_SROA_IDX]] to i32*
; CHECK-NEXT: store i32 3, i32* [[A_SROA_239_2_OVERLAP2_2_2_I32_SROA_CAST]], align 1, !tbaa [[TBAA47:![0-9]+]]
; CHECK-NEXT: [[A_SROA_239_3_OVERLAP2_2_3_I32_SROA_IDX:%.*]] = getelementptr inbounds [7 x i8], [7 x i8]* [[A_SROA_239]], i64 0, i64 3
; CHECK-NEXT: [[A_SROA_239_3_OVERLAP2_2_3_I32_SROA_CAST:%.*]] = bitcast i8* [[A_SROA_239_3_OVERLAP2_2_3_I32_SROA_IDX]] to i32*
; CHECK-NEXT: store i32 4, i32* [[A_SROA_239_3_OVERLAP2_2_3_I32_SROA_CAST]], align 1, !tbaa [[TBAA49:![0-9]+]]
; CHECK-NEXT: [[A_SROA_15_197_OVERLAP2_PREFIX_SROA_IDX:%.*]] = getelementptr inbounds [42 x i8], [42 x i8]* [[A_SROA_15]], i64 0, i64 39
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_15_197_OVERLAP2_PREFIX_SROA_IDX]], i8* align 1 [[SRC]], i32 3, i1 false), !tbaa [[TBAA51:![0-9]+]]
; CHECK-NEXT: [[A_SROA_16_197_SRC_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 3
; CHECK-NEXT: [[A_SROA_16_197_OVERLAP2_PREFIX_SROA_IDX:%.*]] = getelementptr inbounds [7 x i8], [7 x i8]* [[A_SROA_16]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_16_197_OVERLAP2_PREFIX_SROA_IDX]], i8* align 1 [[A_SROA_16_197_SRC_SROA_RAW_IDX]], i32 5, i1 false), !tbaa [[TBAA51]]
; CHECK-NEXT: [[A_SROA_16_2_OVERLAP2_1_2_I8_SROA_IDX:%.*]] = getelementptr inbounds [7 x i8], [7 x i8]* [[A_SROA_16]], i64 0, i64 2
; CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* align 1 [[A_SROA_16_2_OVERLAP2_1_2_I8_SROA_IDX]], i8 42, i32 5, i1 false), !tbaa [[TBAA53:![0-9]+]]
; CHECK-NEXT: [[A_SROA_239_0_OVERLAP2_1_2_I8_SROA_IDX:%.*]] = getelementptr inbounds [7 x i8], [7 x i8]* [[A_SROA_239]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* align 1 [[A_SROA_239_0_OVERLAP2_1_2_I8_SROA_IDX]], i8 42, i32 2, i1 false), !tbaa [[TBAA53]]
; CHECK-NEXT: [[A_SROA_239_209_OVERLAP2_2_1_I8_SROA_IDX11:%.*]] = getelementptr inbounds [7 x i8], [7 x i8]* [[A_SROA_239]], i64 0, i64 1
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_239_209_OVERLAP2_2_1_I8_SROA_IDX11]], i8* align 1 [[SRC]], i32 5, i1 false), !tbaa [[TBAA55:![0-9]+]]
; CHECK-NEXT: [[A_SROA_239_210_OVERLAP2_2_2_I8_SROA_IDX:%.*]] = getelementptr inbounds [7 x i8], [7 x i8]* [[A_SROA_239]], i64 0, i64 2
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_239_210_OVERLAP2_2_2_I8_SROA_IDX]], i8* align 1 [[SRC]], i32 5, i1 false), !tbaa [[TBAA57:![0-9]+]]
; CHECK-NEXT: [[A_SROA_31_210_SRC_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 5
; CHECK-NEXT: [[A_SROA_31_210_OVERLAP2_2_2_I8_SROA_IDX:%.*]] = getelementptr inbounds [85 x i8], [85 x i8]* [[A_SROA_31]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_31_210_OVERLAP2_2_2_I8_SROA_IDX]], i8* align 1 [[A_SROA_31_210_SRC_SROA_RAW_IDX]], i32 3, i1 false), !tbaa [[TBAA57]]
; CHECK-NEXT: [[A_SROA_0_0_B_SROA_IDX1:%.*]] = getelementptr inbounds [42 x i8], [42 x i8]* [[A_SROA_0]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[DST:%.*]], i8* align 1 [[A_SROA_0_0_B_SROA_IDX1]], i32 42, i1 false), !tbaa [[TBAA59:![0-9]+]]
; CHECK-NEXT: [[A_SROA_2_0_DST_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[DST]], i64 42
; CHECK-NEXT: store i8 0, i8* [[A_SROA_2_0_DST_SROA_RAW_IDX]], align 1, !tbaa [[TBAA59]]
; CHECK-NEXT: [[A_SROA_3_0_DST_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[DST]], i64 43
; CHECK-NEXT: [[A_SROA_3_0_B_SROA_IDX3:%.*]] = getelementptr inbounds [99 x i8], [99 x i8]* [[A_SROA_3]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_3_0_DST_SROA_RAW_IDX]], i8* align 1 [[A_SROA_3_0_B_SROA_IDX3]], i32 99, i1 false), !tbaa [[TBAA59]]
; CHECK-NEXT: [[A_SROA_34_0_DST_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[DST]], i64 142
; CHECK-NEXT: [[A_SROA_34_0_B_SROA_IDX5:%.*]] = getelementptr inbounds [16 x i8], [16 x i8]* [[A_SROA_34]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_34_0_DST_SROA_RAW_IDX]], i8* align 1 [[A_SROA_34_0_B_SROA_IDX5]], i32 16, i1 false), !tbaa [[TBAA59]]
; CHECK-NEXT: [[A_SROA_15_0_DST_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[DST]], i64 158
; CHECK-NEXT: [[A_SROA_15_0_B_SROA_IDX6:%.*]] = getelementptr inbounds [42 x i8], [42 x i8]* [[A_SROA_15]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_15_0_DST_SROA_RAW_IDX]], i8* align 1 [[A_SROA_15_0_B_SROA_IDX6]], i32 42, i1 false), !tbaa [[TBAA59]]
; CHECK-NEXT: [[A_SROA_16_0_DST_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[DST]], i64 200
; CHECK-NEXT: [[A_SROA_16_0_B_SROA_IDX7:%.*]] = getelementptr inbounds [7 x i8], [7 x i8]* [[A_SROA_16]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_16_0_DST_SROA_RAW_IDX]], i8* align 1 [[A_SROA_16_0_B_SROA_IDX7]], i32 7, i1 false), !tbaa [[TBAA59]]
; CHECK-NEXT: [[A_SROA_23_0_DST_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[DST]], i64 207
; CHECK-NEXT: store i8 42, i8* [[A_SROA_23_0_DST_SROA_RAW_IDX]], align 1, !tbaa [[TBAA59]]
; CHECK-NEXT: [[A_SROA_239_0_DST_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[DST]], i64 208
; CHECK-NEXT: [[A_SROA_239_0_B_SROA_IDX10:%.*]] = getelementptr inbounds [7 x i8], [7 x i8]* [[A_SROA_239]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_239_0_DST_SROA_RAW_IDX]], i8* align 1 [[A_SROA_239_0_B_SROA_IDX10]], i32 7, i1 false), !tbaa [[TBAA59]]
; CHECK-NEXT: [[A_SROA_31_0_DST_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[DST]], i64 215
; CHECK-NEXT: [[A_SROA_31_0_B_SROA_IDX12:%.*]] = getelementptr inbounds [85 x i8], [85 x i8]* [[A_SROA_31]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_31_0_DST_SROA_RAW_IDX]], i8* align 1 [[A_SROA_31_0_B_SROA_IDX12]], i32 85, i1 false), !tbaa [[TBAA59]]
; CHECK-NEXT: ret void
;
entry:
%a = alloca [300 x i8]
%b = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 0
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %b, i8* align 8 %src, i32 300, i1 false), !tbaa !0
; Clobber a single element of the array, this should be promotable, and be deleted.
%c = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 42
store i8 0, i8* %c
; Make a sequence of overlapping stores to the array. These overlap both in
; forward strides and in shrinking accesses.
%overlap.1.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 142
%overlap.2.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 143
%overlap.3.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 144
%overlap.4.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 145
%overlap.5.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 146
%overlap.6.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 147
%overlap.7.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 148
%overlap.8.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 149
%overlap.9.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 150
%overlap.1.i16 = bitcast i8* %overlap.1.i8 to i16*
%overlap.1.i32 = bitcast i8* %overlap.1.i8 to i32*
%overlap.1.i64 = bitcast i8* %overlap.1.i8 to i64*
%overlap.2.i64 = bitcast i8* %overlap.2.i8 to i64*
%overlap.3.i64 = bitcast i8* %overlap.3.i8 to i64*
%overlap.4.i64 = bitcast i8* %overlap.4.i8 to i64*
%overlap.5.i64 = bitcast i8* %overlap.5.i8 to i64*
%overlap.6.i64 = bitcast i8* %overlap.6.i8 to i64*
%overlap.7.i64 = bitcast i8* %overlap.7.i8 to i64*
%overlap.8.i64 = bitcast i8* %overlap.8.i8 to i64*
%overlap.9.i64 = bitcast i8* %overlap.9.i8 to i64*
store i8 1, i8* %overlap.1.i8, !tbaa !3
store i16 1, i16* %overlap.1.i16, !tbaa !5
store i32 1, i32* %overlap.1.i32, !tbaa !7
store i64 1, i64* %overlap.1.i64, !tbaa !9
store i64 2, i64* %overlap.2.i64, !tbaa !11
store i64 3, i64* %overlap.3.i64, !tbaa !13
store i64 4, i64* %overlap.4.i64, !tbaa !15
store i64 5, i64* %overlap.5.i64, !tbaa !17
store i64 6, i64* %overlap.6.i64, !tbaa !19
store i64 7, i64* %overlap.7.i64, !tbaa !21
store i64 8, i64* %overlap.8.i64, !tbaa !23
store i64 9, i64* %overlap.9.i64, !tbaa !25
; Make two sequences of overlapping stores with more gaps and irregularities.
%overlap2.1.0.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 200
%overlap2.1.1.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 201
%overlap2.1.2.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 202
%overlap2.1.3.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 203
%overlap2.2.0.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 208
%overlap2.2.1.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 209
%overlap2.2.2.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 210
%overlap2.2.3.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 211
%overlap2.1.0.i16 = bitcast i8* %overlap2.1.0.i8 to i16*
%overlap2.1.0.i32 = bitcast i8* %overlap2.1.0.i8 to i32*
%overlap2.1.1.i32 = bitcast i8* %overlap2.1.1.i8 to i32*
%overlap2.1.2.i32 = bitcast i8* %overlap2.1.2.i8 to i32*
%overlap2.1.3.i32 = bitcast i8* %overlap2.1.3.i8 to i32*
store i8 1, i8* %overlap2.1.0.i8, !tbaa !27
store i16 1, i16* %overlap2.1.0.i16, !tbaa !29
store i32 1, i32* %overlap2.1.0.i32, !tbaa !31
store i32 2, i32* %overlap2.1.1.i32, !tbaa !33
store i32 3, i32* %overlap2.1.2.i32, !tbaa !35
store i32 4, i32* %overlap2.1.3.i32, !tbaa !37
%overlap2.2.0.i32 = bitcast i8* %overlap2.2.0.i8 to i32*
%overlap2.2.1.i16 = bitcast i8* %overlap2.2.1.i8 to i16*
%overlap2.2.1.i32 = bitcast i8* %overlap2.2.1.i8 to i32*
%overlap2.2.2.i32 = bitcast i8* %overlap2.2.2.i8 to i32*
%overlap2.2.3.i32 = bitcast i8* %overlap2.2.3.i8 to i32*
store i32 1, i32* %overlap2.2.0.i32, !tbaa !39
store i8 1, i8* %overlap2.2.1.i8, !tbaa !41
store i16 1, i16* %overlap2.2.1.i16, !tbaa !43
store i32 1, i32* %overlap2.2.1.i32, !tbaa !45
store i32 3, i32* %overlap2.2.2.i32, !tbaa !47
store i32 4, i32* %overlap2.2.3.i32, !tbaa !49
%overlap2.prefix = getelementptr i8, i8* %overlap2.1.1.i8, i64 -4
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %overlap2.prefix, i8* %src, i32 8, i1 false), !tbaa !51
; Bridge between the overlapping areas
call void @llvm.memset.p0i8.i32(i8* %overlap2.1.2.i8, i8 42, i32 8, i1 false), !tbaa !53
; ...promoted i8 store...
; Entirely within the second overlap.
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %overlap2.2.1.i8, i8* %src, i32 5, i1 false), !tbaa !55
; Trailing past the second overlap.
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %overlap2.2.2.i8, i8* %src, i32 8, i1 false), !tbaa !57
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %b, i32 300, i1 false), !tbaa !59
ret void
}
define void @test4(i8* %dst, i8* %src) {
; CHECK-LABEL: @test4(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A_SROA_0:%.*]] = alloca [20 x i8], align 1
; CHECK-NEXT: [[A_SROA_2_SROA_4:%.*]] = alloca [7 x i8], align 1
; CHECK-NEXT: [[A_SROA_3:%.*]] = alloca [10 x i8], align 1
; CHECK-NEXT: [[A_SROA_34_SROA_5:%.*]] = alloca [7 x i8], align 1
; CHECK-NEXT: [[A_SROA_6_SROA_4:%.*]] = alloca [7 x i8], align 1
; CHECK-NEXT: [[A_SROA_7:%.*]] = alloca [40 x i8], align 1
; CHECK-NEXT: [[A_SROA_0_0_B_SROA_IDX:%.*]] = getelementptr inbounds [20 x i8], [20 x i8]* [[A_SROA_0]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_0_0_B_SROA_IDX]], i8* align 1 [[SRC:%.*]], i32 20, i1 false), !tbaa [[TBAA0]]
; CHECK-NEXT: [[A_SROA_2_SROA_0_0_A_SROA_2_0_SRC_SROA_RAW_IDX_SROA_IDX:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 20
; CHECK-NEXT: [[A_SROA_2_SROA_0_0_A_SROA_2_0_SRC_SROA_RAW_IDX_SROA_CAST:%.*]] = bitcast i8* [[A_SROA_2_SROA_0_0_A_SROA_2_0_SRC_SROA_RAW_IDX_SROA_IDX]] to i16*
; CHECK-NEXT: [[A_SROA_2_SROA_0_0_COPYLOAD:%.*]] = load i16, i16* [[A_SROA_2_SROA_0_0_A_SROA_2_0_SRC_SROA_RAW_IDX_SROA_CAST]], align 1, !tbaa [[TBAA0]]
; CHECK-NEXT: [[A_SROA_2_SROA_3_0_A_SROA_2_0_SRC_SROA_RAW_IDX_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 22
; CHECK-NEXT: [[A_SROA_2_SROA_3_0_COPYLOAD:%.*]] = load i8, i8* [[A_SROA_2_SROA_3_0_A_SROA_2_0_SRC_SROA_RAW_IDX_SROA_RAW_IDX]], align 1, !tbaa [[TBAA0]]
; CHECK-NEXT: [[A_SROA_2_SROA_4_0_A_SROA_2_0_SRC_SROA_RAW_IDX_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 23
; CHECK-NEXT: [[A_SROA_2_SROA_4_0_B_SROA_IDX:%.*]] = getelementptr inbounds [7 x i8], [7 x i8]* [[A_SROA_2_SROA_4]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_2_SROA_4_0_B_SROA_IDX]], i8* align 1 [[A_SROA_2_SROA_4_0_A_SROA_2_0_SRC_SROA_RAW_IDX_SROA_RAW_IDX]], i32 7, i1 false), !tbaa [[TBAA0]]
; CHECK-NEXT: [[A_SROA_3_0_SRC_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 30
; CHECK-NEXT: [[A_SROA_3_0_B_SROA_IDX:%.*]] = getelementptr inbounds [10 x i8], [10 x i8]* [[A_SROA_3]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_3_0_B_SROA_IDX]], i8* align 1 [[A_SROA_3_0_SRC_SROA_RAW_IDX]], i32 10, i1 false), !tbaa [[TBAA0]]
; CHECK-NEXT: [[A_SROA_34_SROA_0_0_A_SROA_34_0_SRC_SROA_RAW_IDX_SROA_IDX:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 40
; CHECK-NEXT: [[A_SROA_34_SROA_0_0_A_SROA_34_0_SRC_SROA_RAW_IDX_SROA_CAST:%.*]] = bitcast i8* [[A_SROA_34_SROA_0_0_A_SROA_34_0_SRC_SROA_RAW_IDX_SROA_IDX]] to i16*
; CHECK-NEXT: [[A_SROA_34_SROA_0_0_COPYLOAD:%.*]] = load i16, i16* [[A_SROA_34_SROA_0_0_A_SROA_34_0_SRC_SROA_RAW_IDX_SROA_CAST]], align 1, !tbaa [[TBAA0]]
; CHECK-NEXT: [[A_SROA_34_SROA_4_0_A_SROA_34_0_SRC_SROA_RAW_IDX_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 42
; CHECK-NEXT: [[A_SROA_34_SROA_4_0_COPYLOAD:%.*]] = load i8, i8* [[A_SROA_34_SROA_4_0_A_SROA_34_0_SRC_SROA_RAW_IDX_SROA_RAW_IDX]], align 1, !tbaa [[TBAA0]]
; CHECK-NEXT: [[A_SROA_34_SROA_5_0_A_SROA_34_0_SRC_SROA_RAW_IDX_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 43
; CHECK-NEXT: [[A_SROA_34_SROA_5_0_B_SROA_IDX:%.*]] = getelementptr inbounds [7 x i8], [7 x i8]* [[A_SROA_34_SROA_5]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_34_SROA_5_0_B_SROA_IDX]], i8* align 1 [[A_SROA_34_SROA_5_0_A_SROA_34_0_SRC_SROA_RAW_IDX_SROA_RAW_IDX]], i32 7, i1 false), !tbaa [[TBAA0]]
; CHECK-NEXT: [[A_SROA_6_SROA_0_0_A_SROA_6_0_SRC_SROA_RAW_IDX_SROA_IDX:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 50
; CHECK-NEXT: [[A_SROA_6_SROA_0_0_A_SROA_6_0_SRC_SROA_RAW_IDX_SROA_CAST:%.*]] = bitcast i8* [[A_SROA_6_SROA_0_0_A_SROA_6_0_SRC_SROA_RAW_IDX_SROA_IDX]] to i16*
; CHECK-NEXT: [[A_SROA_6_SROA_0_0_COPYLOAD:%.*]] = load i16, i16* [[A_SROA_6_SROA_0_0_A_SROA_6_0_SRC_SROA_RAW_IDX_SROA_CAST]], align 1, !tbaa [[TBAA0]]
; CHECK-NEXT: [[A_SROA_6_SROA_3_0_A_SROA_6_0_SRC_SROA_RAW_IDX_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 52
; CHECK-NEXT: [[A_SROA_6_SROA_3_0_COPYLOAD:%.*]] = load i8, i8* [[A_SROA_6_SROA_3_0_A_SROA_6_0_SRC_SROA_RAW_IDX_SROA_RAW_IDX]], align 1, !tbaa [[TBAA0]]
; CHECK-NEXT: [[A_SROA_6_SROA_4_0_A_SROA_6_0_SRC_SROA_RAW_IDX_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 53
; CHECK-NEXT: [[A_SROA_6_SROA_4_0_B_SROA_IDX:%.*]] = getelementptr inbounds [7 x i8], [7 x i8]* [[A_SROA_6_SROA_4]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_6_SROA_4_0_B_SROA_IDX]], i8* align 1 [[A_SROA_6_SROA_4_0_A_SROA_6_0_SRC_SROA_RAW_IDX_SROA_RAW_IDX]], i32 7, i1 false), !tbaa [[TBAA0]]
; CHECK-NEXT: [[A_SROA_7_0_SRC_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 60
; CHECK-NEXT: [[A_SROA_7_0_B_SROA_IDX:%.*]] = getelementptr inbounds [40 x i8], [40 x i8]* [[A_SROA_7]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_7_0_B_SROA_IDX]], i8* align 1 [[A_SROA_7_0_SRC_SROA_RAW_IDX]], i32 40, i1 false), !tbaa [[TBAA0]]
; CHECK-NEXT: [[A_SROA_2_SROA_4_3_A_SROA_34_SROA_5_0_A_DST_1_SROA_IDX_SROA_IDX:%.*]] = getelementptr inbounds [7 x i8], [7 x i8]* [[A_SROA_34_SROA_5]], i64 0, i64 0
; CHECK-NEXT: [[A_SROA_2_SROA_4_3_A_SRC_1_SROA_IDX:%.*]] = getelementptr inbounds [7 x i8], [7 x i8]* [[A_SROA_2_SROA_4]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_2_SROA_4_3_A_SROA_34_SROA_5_0_A_DST_1_SROA_IDX_SROA_IDX]], i8* align 1 [[A_SROA_2_SROA_4_3_A_SRC_1_SROA_IDX]], i32 7, i1 false), !tbaa [[TBAA3]]
; CHECK-NEXT: [[A_SROA_6_SROA_4_3_A_SROA_34_SROA_5_0_A_DST_1_SROA_IDX16_SROA_IDX:%.*]] = getelementptr inbounds [7 x i8], [7 x i8]* [[A_SROA_34_SROA_5]], i64 0, i64 0
; CHECK-NEXT: [[A_SROA_6_SROA_4_3_A_SRC_2_SROA_IDX:%.*]] = getelementptr inbounds [7 x i8], [7 x i8]* [[A_SROA_6_SROA_4]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_6_SROA_4_3_A_SROA_34_SROA_5_0_A_DST_1_SROA_IDX16_SROA_IDX]], i8* align 1 [[A_SROA_6_SROA_4_3_A_SRC_2_SROA_IDX]], i32 7, i1 false), !tbaa [[TBAA5]]
; CHECK-NEXT: [[A_SROA_0_0_B_SROA_IDX1:%.*]] = getelementptr inbounds [20 x i8], [20 x i8]* [[A_SROA_0]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[DST:%.*]], i8* align 1 [[A_SROA_0_0_B_SROA_IDX1]], i32 20, i1 false), !tbaa [[TBAA7]]
; CHECK-NEXT: [[A_SROA_2_SROA_0_0_A_SROA_2_0_DST_SROA_RAW_IDX_SROA_IDX:%.*]] = getelementptr inbounds i8, i8* [[DST]], i64 20
; CHECK-NEXT: [[A_SROA_2_SROA_0_0_A_SROA_2_0_DST_SROA_RAW_IDX_SROA_CAST:%.*]] = bitcast i8* [[A_SROA_2_SROA_0_0_A_SROA_2_0_DST_SROA_RAW_IDX_SROA_IDX]] to i16*
; CHECK-NEXT: store i16 [[A_SROA_2_SROA_0_0_COPYLOAD]], i16* [[A_SROA_2_SROA_0_0_A_SROA_2_0_DST_SROA_RAW_IDX_SROA_CAST]], align 1, !tbaa [[TBAA7]]
; CHECK-NEXT: [[A_SROA_2_SROA_3_0_A_SROA_2_0_DST_SROA_RAW_IDX_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[DST]], i64 22
; CHECK-NEXT: store i8 [[A_SROA_2_SROA_3_0_COPYLOAD]], i8* [[A_SROA_2_SROA_3_0_A_SROA_2_0_DST_SROA_RAW_IDX_SROA_RAW_IDX]], align 1, !tbaa [[TBAA7]]
; CHECK-NEXT: [[A_SROA_2_SROA_4_0_A_SROA_2_0_DST_SROA_RAW_IDX_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[DST]], i64 23
; CHECK-NEXT: [[A_SROA_2_SROA_4_0_B_SROA_IDX22:%.*]] = getelementptr inbounds [7 x i8], [7 x i8]* [[A_SROA_2_SROA_4]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_2_SROA_4_0_A_SROA_2_0_DST_SROA_RAW_IDX_SROA_RAW_IDX]], i8* align 1 [[A_SROA_2_SROA_4_0_B_SROA_IDX22]], i32 7, i1 false), !tbaa [[TBAA7]]
; CHECK-NEXT: [[A_SROA_3_0_DST_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[DST]], i64 30
; CHECK-NEXT: [[A_SROA_3_0_B_SROA_IDX3:%.*]] = getelementptr inbounds [10 x i8], [10 x i8]* [[A_SROA_3]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_3_0_DST_SROA_RAW_IDX]], i8* align 1 [[A_SROA_3_0_B_SROA_IDX3]], i32 10, i1 false), !tbaa [[TBAA7]]
; CHECK-NEXT: [[A_SROA_34_SROA_0_0_A_SROA_34_0_DST_SROA_RAW_IDX_SROA_IDX:%.*]] = getelementptr inbounds i8, i8* [[DST]], i64 40
; CHECK-NEXT: [[A_SROA_34_SROA_0_0_A_SROA_34_0_DST_SROA_RAW_IDX_SROA_CAST:%.*]] = bitcast i8* [[A_SROA_34_SROA_0_0_A_SROA_34_0_DST_SROA_RAW_IDX_SROA_IDX]] to i16*
; CHECK-NEXT: store i16 [[A_SROA_6_SROA_0_0_COPYLOAD]], i16* [[A_SROA_34_SROA_0_0_A_SROA_34_0_DST_SROA_RAW_IDX_SROA_CAST]], align 1, !tbaa [[TBAA7]]
; CHECK-NEXT: [[A_SROA_34_SROA_4_0_A_SROA_34_0_DST_SROA_RAW_IDX_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[DST]], i64 42
; CHECK-NEXT: store i8 [[A_SROA_6_SROA_3_0_COPYLOAD]], i8* [[A_SROA_34_SROA_4_0_A_SROA_34_0_DST_SROA_RAW_IDX_SROA_RAW_IDX]], align 1, !tbaa [[TBAA7]]
; CHECK-NEXT: [[A_SROA_34_SROA_5_0_A_SROA_34_0_DST_SROA_RAW_IDX_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[DST]], i64 43
; CHECK-NEXT: [[A_SROA_34_SROA_5_0_B_SROA_IDX15:%.*]] = getelementptr inbounds [7 x i8], [7 x i8]* [[A_SROA_34_SROA_5]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_34_SROA_5_0_A_SROA_34_0_DST_SROA_RAW_IDX_SROA_RAW_IDX]], i8* align 1 [[A_SROA_34_SROA_5_0_B_SROA_IDX15]], i32 7, i1 false), !tbaa [[TBAA7]]
; CHECK-NEXT: [[A_SROA_6_SROA_0_0_A_SROA_6_0_DST_SROA_RAW_IDX_SROA_IDX:%.*]] = getelementptr inbounds i8, i8* [[DST]], i64 50
; CHECK-NEXT: [[A_SROA_6_SROA_0_0_A_SROA_6_0_DST_SROA_RAW_IDX_SROA_CAST:%.*]] = bitcast i8* [[A_SROA_6_SROA_0_0_A_SROA_6_0_DST_SROA_RAW_IDX_SROA_IDX]] to i16*
; CHECK-NEXT: store i16 [[A_SROA_6_SROA_0_0_COPYLOAD]], i16* [[A_SROA_6_SROA_0_0_A_SROA_6_0_DST_SROA_RAW_IDX_SROA_CAST]], align 1, !tbaa [[TBAA7]]
; CHECK-NEXT: [[A_SROA_6_SROA_3_0_A_SROA_6_0_DST_SROA_RAW_IDX_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[DST]], i64 52
; CHECK-NEXT: store i8 [[A_SROA_6_SROA_3_0_COPYLOAD]], i8* [[A_SROA_6_SROA_3_0_A_SROA_6_0_DST_SROA_RAW_IDX_SROA_RAW_IDX]], align 1, !tbaa [[TBAA7]]
; CHECK-NEXT: [[A_SROA_6_SROA_4_0_A_SROA_6_0_DST_SROA_RAW_IDX_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[DST]], i64 53
; CHECK-NEXT: [[A_SROA_6_SROA_4_0_B_SROA_IDX19:%.*]] = getelementptr inbounds [7 x i8], [7 x i8]* [[A_SROA_6_SROA_4]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_6_SROA_4_0_A_SROA_6_0_DST_SROA_RAW_IDX_SROA_RAW_IDX]], i8* align 1 [[A_SROA_6_SROA_4_0_B_SROA_IDX19]], i32 7, i1 false), !tbaa [[TBAA7]]
; CHECK-NEXT: [[A_SROA_7_0_DST_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[DST]], i64 60
; CHECK-NEXT: [[A_SROA_7_0_B_SROA_IDX8:%.*]] = getelementptr inbounds [40 x i8], [40 x i8]* [[A_SROA_7]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_7_0_DST_SROA_RAW_IDX]], i8* align 1 [[A_SROA_7_0_B_SROA_IDX8]], i32 40, i1 false), !tbaa [[TBAA7]]
; CHECK-NEXT: ret void
;
entry:
%a = alloca [100 x i8]
%b = getelementptr [100 x i8], [100 x i8]* %a, i64 0, i64 0
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %b, i8* %src, i32 100, i1 false), !tbaa !0
%a.src.1 = getelementptr [100 x i8], [100 x i8]* %a, i64 0, i64 20
%a.dst.1 = getelementptr [100 x i8], [100 x i8]* %a, i64 0, i64 40
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a.dst.1, i8* %a.src.1, i32 10, i1 false), !tbaa !3
; Clobber a single element of the array, this should be promotable, and be deleted.
%c = getelementptr [100 x i8], [100 x i8]* %a, i64 0, i64 42
store i8 0, i8* %c
%a.src.2 = getelementptr [100 x i8], [100 x i8]* %a, i64 0, i64 50
call void @llvm.memmove.p0i8.p0i8.i32(i8* %a.dst.1, i8* %a.src.2, i32 10, i1 false), !tbaa !5
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %b, i32 100, i1 false), !tbaa !7
ret void
}
declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind
declare void @llvm.memcpy.p1i8.p0i8.i32(i8 addrspace(1)* nocapture, i8* nocapture, i32, i1) nounwind
declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind
declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i1) nounwind
define i16 @test5() {
; CHECK-LABEL: @test5(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = bitcast float 0.000000e+00 to i32
; CHECK-NEXT: [[A_SROA_0_2_EXTRACT_SHIFT:%.*]] = lshr i32 [[TMP0]], 16
; CHECK-NEXT: [[A_SROA_0_2_EXTRACT_TRUNC:%.*]] = trunc i32 [[A_SROA_0_2_EXTRACT_SHIFT]] to i16
; CHECK-NEXT: ret i16 [[A_SROA_0_2_EXTRACT_TRUNC]]
;
entry:
%a = alloca [4 x i8]
%fptr = bitcast [4 x i8]* %a to float*
store float 0.0, float* %fptr
%ptr = getelementptr [4 x i8], [4 x i8]* %a, i32 0, i32 2
%iptr = bitcast i8* %ptr to i16*
%val = load i16, i16* %iptr
ret i16 %val
}
define i16 @test5_multi_addrspace_access() {
; CHECK-LABEL: @test5_multi_addrspace_access(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = bitcast float 0.000000e+00 to i32
; CHECK-NEXT: [[A_SROA_0_2_EXTRACT_SHIFT:%.*]] = lshr i32 [[TMP0]], 16
; CHECK-NEXT: [[A_SROA_0_2_EXTRACT_TRUNC:%.*]] = trunc i32 [[A_SROA_0_2_EXTRACT_SHIFT]] to i16
; CHECK-NEXT: ret i16 [[A_SROA_0_2_EXTRACT_TRUNC]]
;
entry:
%a = alloca [4 x i8]
%fptr = bitcast [4 x i8]* %a to float*
%fptr.as1 = addrspacecast float* %fptr to float addrspace(1)*
store float 0.0, float addrspace(1)* %fptr.as1
%ptr = getelementptr [4 x i8], [4 x i8]* %a, i32 0, i32 2
%iptr = bitcast i8* %ptr to i16*
%val = load i16, i16* %iptr
ret i16 %val
}
define i32 @test6() {
; CHECK-LABEL: @test6(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A_SROA_0:%.*]] = alloca i32, align 4
; CHECK-NEXT: store volatile i32 707406378, i32* [[A_SROA_0]], align 4
; CHECK-NEXT: [[A_SROA_0_0_A_SROA_0_0_VAL:%.*]] = load i32, i32* [[A_SROA_0]], align 4
; CHECK-NEXT: ret i32 [[A_SROA_0_0_A_SROA_0_0_VAL]]
;
entry:
%a = alloca [4 x i8]
%ptr = getelementptr [4 x i8], [4 x i8]* %a, i32 0, i32 0
call void @llvm.memset.p0i8.i32(i8* %ptr, i8 42, i32 4, i1 true)
%iptr = bitcast i8* %ptr to i32*
%val = load i32, i32* %iptr
ret i32 %val
}
define void @test7(i8* %src, i8* %dst) {
; CHECK-LABEL: @test7(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A_SROA_0:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[A_SROA_0_0_SRC_SROA_CAST:%.*]] = bitcast i8* [[SRC:%.*]] to i32*
; CHECK-NEXT: [[A_SROA_0_0_COPYLOAD:%.*]] = load volatile i32, i32* [[A_SROA_0_0_SRC_SROA_CAST]], align 1, !tbaa [[TBAA0]]
; CHECK-NEXT: store volatile i32 [[A_SROA_0_0_COPYLOAD]], i32* [[A_SROA_0]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[A_SROA_0_0_DST_SROA_CAST:%.*]] = bitcast i8* [[DST:%.*]] to i32*
; CHECK-NEXT: [[A_SROA_0_0_A_SROA_0_0_COPYLOAD1:%.*]] = load volatile i32, i32* [[A_SROA_0]], align 4, !tbaa [[TBAA3]]
; CHECK-NEXT: store volatile i32 [[A_SROA_0_0_A_SROA_0_0_COPYLOAD1]], i32* [[A_SROA_0_0_DST_SROA_CAST]], align 1, !tbaa [[TBAA3]]
; CHECK-NEXT: ret void
;
entry:
%a = alloca [4 x i8]
%ptr = getelementptr [4 x i8], [4 x i8]* %a, i32 0, i32 0
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %ptr, i8* %src, i32 4, i1 true), !tbaa !0
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %ptr, i32 4, i1 true), !tbaa !3
ret void
}
%S1 = type { i32, i32, [16 x i8] }
%S2 = type { %S1*, %S2* }
define %S2 @test8(%S2* %arg) {
; CHECK-LABEL: @test8(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[S2_NEXT_PTR:%.*]] = getelementptr [[S2:%.*]], %S2* [[ARG:%.*]], i64 0, i32 1
; CHECK-NEXT: [[S2_NEXT:%.*]] = load %S2*, %S2** [[S2_NEXT_PTR]], align 8, !tbaa [[TBAA0]]
; CHECK-NEXT: [[S2_NEXT_S1_PTR:%.*]] = getelementptr [[S2]], %S2* [[S2_NEXT]], i64 0, i32 0
; CHECK-NEXT: [[S2_NEXT_S1:%.*]] = load %S1*, %S1** [[S2_NEXT_S1_PTR]], align 8, !tbaa [[TBAA3]]
; CHECK-NEXT: [[S2_NEXT_NEXT_PTR:%.*]] = getelementptr [[S2]], %S2* [[S2_NEXT]], i64 0, i32 1
; CHECK-NEXT: [[S2_NEXT_NEXT:%.*]] = load %S2*, %S2** [[S2_NEXT_NEXT_PTR]], align 8, !tbaa [[TBAA7]]
; CHECK-NEXT: [[RESULT1:%.*]] = insertvalue [[S2]] undef, %S1* [[S2_NEXT_S1]], 0
; CHECK-NEXT: [[RESULT2:%.*]] = insertvalue [[S2]] [[RESULT1]], %S2* [[S2_NEXT_NEXT]], 1
; CHECK-NEXT: ret [[S2]] [[RESULT2]]
;
entry:
%new = alloca %S2
%s2.next.ptr = getelementptr %S2, %S2* %arg, i64 0, i32 1
%s2.next = load %S2*, %S2** %s2.next.ptr, !tbaa !0
%s2.next.s1.ptr = getelementptr %S2, %S2* %s2.next, i64 0, i32 0
%s2.next.s1 = load %S1*, %S1** %s2.next.s1.ptr, !tbaa !3
%new.s1.ptr = getelementptr %S2, %S2* %new, i64 0, i32 0
store %S1* %s2.next.s1, %S1** %new.s1.ptr, !tbaa !5
%s2.next.next.ptr = getelementptr %S2, %S2* %s2.next, i64 0, i32 1
%s2.next.next = load %S2*, %S2** %s2.next.next.ptr, !tbaa !7
%new.next.ptr = getelementptr %S2, %S2* %new, i64 0, i32 1
store %S2* %s2.next.next, %S2** %new.next.ptr, !tbaa !9
%new.s1 = load %S1*, %S1** %new.s1.ptr
%result1 = insertvalue %S2 undef, %S1* %new.s1, 0
%new.next = load %S2*, %S2** %new.next.ptr
%result2 = insertvalue %S2 %result1, %S2* %new.next, 1
ret %S2 %result2
}
define i64 @test9() {
; Ensure we can handle loads off the end of an alloca even when wrapped in
; weird bit casts and types. This is valid IR due to the alignment and masking
; off the bits past the end of the alloca.
;
; CHECK-LABEL: @test9(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A_SROA_3_0_INSERT_EXT:%.*]] = zext i8 26 to i64
; CHECK-NEXT: [[A_SROA_3_0_INSERT_SHIFT:%.*]] = shl i64 [[A_SROA_3_0_INSERT_EXT]], 16
; CHECK-NEXT: [[A_SROA_3_0_INSERT_MASK:%.*]] = and i64 undef, -16711681
; CHECK-NEXT: [[A_SROA_3_0_INSERT_INSERT:%.*]] = or i64 [[A_SROA_3_0_INSERT_MASK]], [[A_SROA_3_0_INSERT_SHIFT]]
; CHECK-NEXT: [[A_SROA_2_0_INSERT_EXT:%.*]] = zext i8 0 to i64
; CHECK-NEXT: [[A_SROA_2_0_INSERT_SHIFT:%.*]] = shl i64 [[A_SROA_2_0_INSERT_EXT]], 8
; CHECK-NEXT: [[A_SROA_2_0_INSERT_MASK:%.*]] = and i64 [[A_SROA_3_0_INSERT_INSERT]], -65281
; CHECK-NEXT: [[A_SROA_2_0_INSERT_INSERT:%.*]] = or i64 [[A_SROA_2_0_INSERT_MASK]], [[A_SROA_2_0_INSERT_SHIFT]]
; CHECK-NEXT: [[A_SROA_0_0_INSERT_EXT:%.*]] = zext i8 0 to i64
; CHECK-NEXT: [[A_SROA_0_0_INSERT_MASK:%.*]] = and i64 [[A_SROA_2_0_INSERT_INSERT]], -256
; CHECK-NEXT: [[A_SROA_0_0_INSERT_INSERT:%.*]] = or i64 [[A_SROA_0_0_INSERT_MASK]], [[A_SROA_0_0_INSERT_EXT]]
; CHECK-NEXT: [[RESULT:%.*]] = and i64 [[A_SROA_0_0_INSERT_INSERT]], 16777215
; CHECK-NEXT: ret i64 [[RESULT]]
;
entry:
%a = alloca { [3 x i8] }, align 8
%gep1 = getelementptr inbounds { [3 x i8] }, { [3 x i8] }* %a, i32 0, i32 0, i32 0
store i8 0, i8* %gep1, align 1
%gep2 = getelementptr inbounds { [3 x i8] }, { [3 x i8] }* %a, i32 0, i32 0, i32 1
store i8 0, i8* %gep2, align 1
%gep3 = getelementptr inbounds { [3 x i8] }, { [3 x i8] }* %a, i32 0, i32 0, i32 2
store i8 26, i8* %gep3, align 1
%cast = bitcast { [3 x i8] }* %a to { i64 }*
%elt = getelementptr inbounds { i64 }, { i64 }* %cast, i32 0, i32 0
%load = load i64, i64* %elt
%result = and i64 %load, 16777215
ret i64 %result
}
define %S2* @test10() {
; CHECK-LABEL: @test10(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = ptrtoint %S2* null to i64
; CHECK-NEXT: ret %S2* null
;
entry:
%a = alloca [8 x i8]
%ptr = getelementptr [8 x i8], [8 x i8]* %a, i32 0, i32 0
call void @llvm.memset.p0i8.i32(i8* %ptr, i8 0, i32 8, i1 false)
%s2ptrptr = bitcast i8* %ptr to %S2**
%s2ptr = load %S2*, %S2** %s2ptrptr
ret %S2* %s2ptr
}
define i32 @test11() {
; CHECK-LABEL: @test11(
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 undef, label [[GOOD:%.*]], label [[BAD:%.*]]
; CHECK: good:
; CHECK-NEXT: ret i32 0
; CHECK: bad:
; CHECK-NEXT: ret i32 undef
;
entry:
%X = alloca i32
br i1 undef, label %good, label %bad
good:
%Y = getelementptr i32, i32* %X, i64 0
store i32 0, i32* %Y
%Z = load i32, i32* %Y
ret i32 %Z
bad:
%Y2 = getelementptr i32, i32* %X, i64 1
store i32 0, i32* %Y2
%Z2 = load i32, i32* %Y2
ret i32 %Z2
}
define i8 @test12() {
; We fully promote these to the i24 load or store size, resulting in just masks
; and other operations that instcombine will fold, but no alloca.
;
; CHECK-LABEL: @test12(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A_SROA_3_0_INSERT_EXT:%.*]] = zext i8 0 to i24
; CHECK-NEXT: [[A_SROA_3_0_INSERT_SHIFT:%.*]] = shl i24 [[A_SROA_3_0_INSERT_EXT]], 16
; CHECK-NEXT: [[A_SROA_3_0_INSERT_MASK:%.*]] = and i24 undef, 65535
; CHECK-NEXT: [[A_SROA_3_0_INSERT_INSERT:%.*]] = or i24 [[A_SROA_3_0_INSERT_MASK]], [[A_SROA_3_0_INSERT_SHIFT]]
; CHECK-NEXT: [[A_SROA_2_0_INSERT_EXT:%.*]] = zext i8 0 to i24
; CHECK-NEXT: [[A_SROA_2_0_INSERT_SHIFT:%.*]] = shl i24 [[A_SROA_2_0_INSERT_EXT]], 8
; CHECK-NEXT: [[A_SROA_2_0_INSERT_MASK:%.*]] = and i24 [[A_SROA_3_0_INSERT_INSERT]], -65281
; CHECK-NEXT: [[A_SROA_2_0_INSERT_INSERT:%.*]] = or i24 [[A_SROA_2_0_INSERT_MASK]], [[A_SROA_2_0_INSERT_SHIFT]]
; CHECK-NEXT: [[A_SROA_0_0_INSERT_EXT:%.*]] = zext i8 0 to i24
; CHECK-NEXT: [[A_SROA_0_0_INSERT_MASK:%.*]] = and i24 [[A_SROA_2_0_INSERT_INSERT]], -256
; CHECK-NEXT: [[A_SROA_0_0_INSERT_INSERT:%.*]] = or i24 [[A_SROA_0_0_INSERT_MASK]], [[A_SROA_0_0_INSERT_EXT]]
; CHECK-NEXT: [[B_SROA_0_0_EXTRACT_TRUNC:%.*]] = trunc i24 [[A_SROA_0_0_INSERT_INSERT]] to i8
; CHECK-NEXT: [[B_SROA_2_0_EXTRACT_SHIFT:%.*]] = lshr i24 [[A_SROA_0_0_INSERT_INSERT]], 8
; CHECK-NEXT: [[B_SROA_2_0_EXTRACT_TRUNC:%.*]] = trunc i24 [[B_SROA_2_0_EXTRACT_SHIFT]] to i8
; CHECK-NEXT: [[B_SROA_3_0_EXTRACT_SHIFT:%.*]] = lshr i24 [[A_SROA_0_0_INSERT_INSERT]], 16
; CHECK-NEXT: [[B_SROA_3_0_EXTRACT_TRUNC:%.*]] = trunc i24 [[B_SROA_3_0_EXTRACT_SHIFT]] to i8
; CHECK-NEXT: [[BSUM0:%.*]] = add i8 [[B_SROA_0_0_EXTRACT_TRUNC]], [[B_SROA_2_0_EXTRACT_TRUNC]]
; CHECK-NEXT: [[BSUM1:%.*]] = add i8 [[BSUM0]], [[B_SROA_3_0_EXTRACT_TRUNC]]
; CHECK-NEXT: ret i8 [[BSUM1]]
;
entry:
%a = alloca [3 x i8]
%b = alloca [3 x i8]
%a0ptr = getelementptr [3 x i8], [3 x i8]* %a, i64 0, i32 0
store i8 0, i8* %a0ptr
%a1ptr = getelementptr [3 x i8], [3 x i8]* %a, i64 0, i32 1
store i8 0, i8* %a1ptr
%a2ptr = getelementptr [3 x i8], [3 x i8]* %a, i64 0, i32 2
store i8 0, i8* %a2ptr
%aiptr = bitcast [3 x i8]* %a to i24*
%ai = load i24, i24* %aiptr
%biptr = bitcast [3 x i8]* %b to i24*
store i24 %ai, i24* %biptr
%b0ptr = getelementptr [3 x i8], [3 x i8]* %b, i64 0, i32 0
%b0 = load i8, i8* %b0ptr
%b1ptr = getelementptr [3 x i8], [3 x i8]* %b, i64 0, i32 1
%b1 = load i8, i8* %b1ptr
%b2ptr = getelementptr [3 x i8], [3 x i8]* %b, i64 0, i32 2
%b2 = load i8, i8* %b2ptr
%bsum0 = add i8 %b0, %b1
%bsum1 = add i8 %bsum0, %b2
ret i8 %bsum1
}
define i32 @test13() {
; Ensure we don't crash and handle undefined loads that straddle the end of the
; allocation.
; CHECK-LABEL: @test13(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A_SROA_2_2_LOAD_EXT:%.*]] = zext i8 0 to i16
; CHECK-NEXT: [[RET:%.*]] = zext i16 [[A_SROA_2_2_LOAD_EXT]] to i32
; CHECK-NEXT: ret i32 [[RET]]
;
entry:
%a = alloca [3 x i8], align 2
%b0ptr = getelementptr [3 x i8], [3 x i8]* %a, i64 0, i32 0
store i8 0, i8* %b0ptr
%b1ptr = getelementptr [3 x i8], [3 x i8]* %a, i64 0, i32 1
store i8 0, i8* %b1ptr
%b2ptr = getelementptr [3 x i8], [3 x i8]* %a, i64 0, i32 2
store i8 0, i8* %b2ptr
%iptrcast = bitcast [3 x i8]* %a to i16*
%iptrgep = getelementptr i16, i16* %iptrcast, i64 1
%i = load i16, i16* %iptrgep
%ret = zext i16 %i to i32
ret i32 %ret
}
%test14.struct = type { [3 x i32] }
define void @test14(...) nounwind uwtable {
; This is a strange case where we split allocas into promotable partitions, but
; also gain enough data to prove they must be dead allocas due to GEPs that walk
; across two adjacent allocas. Test that we don't try to promote or otherwise
; do bad things to these dead allocas, they should just be removed.
; CHECK-LABEL: @test14(
; CHECK-NEXT: entry:
; CHECK-NEXT: ret void
;
entry:
%a = alloca %test14.struct
%p = alloca %test14.struct*
%0 = bitcast %test14.struct* %a to i8*
%1 = getelementptr i8, i8* %0, i64 12
%2 = bitcast i8* %1 to %test14.struct*
%3 = getelementptr inbounds %test14.struct, %test14.struct* %2, i32 0, i32 0
%4 = getelementptr inbounds %test14.struct, %test14.struct* %a, i32 0, i32 0
%5 = bitcast [3 x i32]* %3 to i32*
%6 = bitcast [3 x i32]* %4 to i32*
%7 = load i32, i32* %6, align 4
store i32 %7, i32* %5, align 4
%8 = getelementptr inbounds i32, i32* %5, i32 1
%9 = getelementptr inbounds i32, i32* %6, i32 1
%10 = load i32, i32* %9, align 4
store i32 %10, i32* %8, align 4
%11 = getelementptr inbounds i32, i32* %5, i32 2
%12 = getelementptr inbounds i32, i32* %6, i32 2
%13 = load i32, i32* %12, align 4
store i32 %13, i32* %11, align 4
ret void
}
define i32 @test15(i1 %flag) nounwind uwtable {
; Ensure that when there are dead instructions using an alloca that are not
; loads or stores we still delete them during partitioning and rewriting.
; Otherwise we'll go to promote them while thy still have unpromotable uses.
; CHECK-LABEL: @test15(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
; CHECK-NEXT: br label [[LOOP]]
;
entry:
%l0 = alloca i64
%l1 = alloca i64
%l2 = alloca i64
%l3 = alloca i64
br label %loop
loop:
%dead3 = phi i8* [ %gep3, %loop ], [ null, %entry ]
store i64 1879048192, i64* %l0, align 8
%bc0 = bitcast i64* %l0 to i8*
%gep0 = getelementptr i8, i8* %bc0, i64 3
%dead0 = bitcast i8* %gep0 to i64*
store i64 1879048192, i64* %l1, align 8
%bc1 = bitcast i64* %l1 to i8*
%gep1 = getelementptr i8, i8* %bc1, i64 3
%dead1 = getelementptr i8, i8* %gep1, i64 1
store i64 1879048192, i64* %l2, align 8
%bc2 = bitcast i64* %l2 to i8*
%gep2.1 = getelementptr i8, i8* %bc2, i64 1
%gep2.2 = getelementptr i8, i8* %bc2, i64 3
; Note that this select should get visited multiple times due to using two
; different GEPs off the same alloca. We should only delete it once.
%dead2 = select i1 %flag, i8* %gep2.1, i8* %gep2.2
store i64 1879048192, i64* %l3, align 8
%bc3 = bitcast i64* %l3 to i8*
%gep3 = getelementptr i8, i8* %bc3, i64 3
br label %loop
}
define void @test16(i8* %src, i8* %dst) {
; Ensure that we can promote an alloca of [3 x i8] to an i24 SSA value.
; CHECK-LABEL: @test16(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A_SROA_0_0_SRC_SROA_CAST:%.*]] = bitcast i8* [[SRC:%.*]] to i24*
; CHECK-NEXT: [[A_SROA_0_0_COPYLOAD:%.*]] = load i24, i24* [[A_SROA_0_0_SRC_SROA_CAST]], align 1, !tbaa [[TBAA0]]
; CHECK-NEXT: [[A_SROA_0_0_DST_SROA_CAST:%.*]] = bitcast i8* [[DST:%.*]] to i24*
; CHECK-NEXT: store i24 0, i24* [[A_SROA_0_0_DST_SROA_CAST]], align 1, !tbaa [[TBAA5]]
; CHECK-NEXT: ret void
;
entry:
%a = alloca [3 x i8]
%ptr = getelementptr [3 x i8], [3 x i8]* %a, i32 0, i32 0
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %ptr, i8* %src, i32 4, i1 false), !tbaa !0
%cast = bitcast i8* %ptr to i24*
store i24 0, i24* %cast, !tbaa !3
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %ptr, i32 4, i1 false), !tbaa !5
ret void
}
define void @test17(i8* %src, i8* %dst) {
; Ensure that we can rewrite unpromotable memcpys which extend past the end of
; the alloca.
; CHECK-LABEL: @test17(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A:%.*]] = alloca [3 x i8], align 1
; CHECK-NEXT: [[PTR:%.*]] = getelementptr [3 x i8], [3 x i8]* [[A]], i32 0, i32 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[PTR]], i8* [[SRC:%.*]], i32 4, i1 true), !tbaa [[TBAA0]]
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[DST:%.*]], i8* [[PTR]], i32 4, i1 true), !tbaa [[TBAA3]]
; CHECK-NEXT: ret void
;
entry:
%a = alloca [3 x i8]
%ptr = getelementptr [3 x i8], [3 x i8]* %a, i32 0, i32 0
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %ptr, i8* %src, i32 4, i1 true), !tbaa !0
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %ptr, i32 4, i1 true), !tbaa !3
ret void
}
define void @test18(i8* %src, i8* %dst, i32 %size) {
; Preserve transfer instrinsics with a variable size, even if they overlap with
; fixed size operations. Further, continue to split and promote allocas preceding
; the variable sized intrinsic.
; CHECK-LABEL: @test18(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A_SROA_33:%.*]] = alloca [34 x i8], align 1
; CHECK-NEXT: [[A_SROA_0_0_SRC_SROA_CAST:%.*]] = bitcast i8* [[SRC:%.*]] to i32*
; CHECK-NEXT: [[A_SROA_0_0_COPYLOAD:%.*]] = load i32, i32* [[A_SROA_0_0_SRC_SROA_CAST]], align 1, !tbaa [[TBAA0]]
; CHECK-NEXT: [[A_SROA_3_0_SRC_SROA_IDX:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 4
; CHECK-NEXT: [[A_SROA_3_0_SRC_SROA_CAST:%.*]] = bitcast i8* [[A_SROA_3_0_SRC_SROA_IDX]] to i32*
; CHECK-NEXT: [[A_SROA_3_0_COPYLOAD:%.*]] = load i32, i32* [[A_SROA_3_0_SRC_SROA_CAST]], align 1, !tbaa [[TBAA0]]
; CHECK-NEXT: [[A_SROA_33_0_PTR2_SROA_IDX:%.*]] = getelementptr inbounds [34 x i8], [34 x i8]* [[A_SROA_33]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[A_SROA_33_0_PTR2_SROA_IDX]], i8* [[SRC]], i32 [[SIZE:%.*]], i1 false), !tbaa [[TBAA3]]
; CHECK-NEXT: [[A_SROA_33_0_PTR2_SROA_IDX6:%.*]] = getelementptr inbounds [34 x i8], [34 x i8]* [[A_SROA_33]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* align 1 [[A_SROA_33_0_PTR2_SROA_IDX6]], i8 42, i32 [[SIZE]], i1 false), !tbaa [[TBAA5]]
; CHECK-NEXT: [[A_SROA_0_0_DST_SROA_CAST:%.*]] = bitcast i8* [[DST:%.*]] to i32*
; CHECK-NEXT: store i32 42, i32* [[A_SROA_0_0_DST_SROA_CAST]], align 1, !tbaa [[TBAA9]]
; CHECK-NEXT: [[A_SROA_3_0_DST_SROA_IDX:%.*]] = getelementptr inbounds i8, i8* [[DST]], i64 4
; CHECK-NEXT: [[A_SROA_3_0_DST_SROA_CAST:%.*]] = bitcast i8* [[A_SROA_3_0_DST_SROA_IDX]] to i32*
; CHECK-NEXT: store i32 [[A_SROA_3_0_COPYLOAD]], i32* [[A_SROA_3_0_DST_SROA_CAST]], align 1, !tbaa [[TBAA9]]
; CHECK-NEXT: [[A_SROA_33_0_PTR2_SROA_IDX7:%.*]] = getelementptr inbounds [34 x i8], [34 x i8]* [[A_SROA_33]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[DST]], i8* align 1 [[A_SROA_33_0_PTR2_SROA_IDX7]], i32 [[SIZE]], i1 false), !tbaa [[TBAA11]]
; CHECK-NEXT: ret void
;
entry:
%a = alloca [42 x i8]
%ptr = getelementptr [42 x i8], [42 x i8]* %a, i32 0, i32 0
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %ptr, i8* %src, i32 8, i1 false), !tbaa !0
%ptr2 = getelementptr [42 x i8], [42 x i8]* %a, i32 0, i32 8
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %ptr2, i8* %src, i32 %size, i1 false), !tbaa !3
call void @llvm.memset.p0i8.i32(i8* %ptr2, i8 42, i32 %size, i1 false), !tbaa !5
%cast = bitcast i8* %ptr to i32*
store i32 42, i32* %cast, !tbaa !7
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %ptr, i32 8, i1 false), !tbaa !9
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %ptr2, i32 %size, i1 false), !tbaa !11
ret void
}
%opaque = type opaque
define i32 @test19(%opaque* %x) {
; This input will cause us to try to compute a natural GEP when rewriting
; pointers in such a way that we try to GEP through the opaque type. Previously,
; a check for an unsized type was missing and this crashed. Ensure it behaves
; reasonably now.
; CHECK-LABEL: @test19(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CAST1:%.*]] = bitcast %opaque* [[X:%.*]] to i8*
; CHECK-NEXT: [[A_SROA_0_0_CAST1_SROA_CAST:%.*]] = bitcast i8* [[CAST1]] to i64*
; CHECK-NEXT: [[A_SROA_0_0_COPYLOAD:%.*]] = load i64, i64* [[A_SROA_0_0_CAST1_SROA_CAST]], align 1
; CHECK-NEXT: [[A_SROA_2_0_CAST1_SROA_IDX:%.*]] = getelementptr inbounds i8, i8* [[CAST1]], i64 8
; CHECK-NEXT: [[A_SROA_2_0_CAST1_SROA_CAST:%.*]] = bitcast i8* [[A_SROA_2_0_CAST1_SROA_IDX]] to i8**
; CHECK-NEXT: [[A_SROA_2_0_COPYLOAD:%.*]] = load i8*, i8** [[A_SROA_2_0_CAST1_SROA_CAST]], align 1
; CHECK-NEXT: ret i32 undef
;
entry:
%a = alloca { i64, i8* }
%cast1 = bitcast %opaque* %x to i8*
%cast2 = bitcast { i64, i8* }* %a to i8*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %cast2, i8* %cast1, i32 16, i1 false)
%gep = getelementptr inbounds { i64, i8* }, { i64, i8* }* %a, i32 0, i32 0
%val = load i64, i64* %gep
ret i32 undef
}
declare void @llvm.memcpy.p0i8.p1i8.i32(i8* nocapture, i8 addrspace(1)* nocapture, i32, i32, i1) nounwind
define i32 @test19_addrspacecast(%opaque* %x) {
; This input will cause us to try to compute a natural GEP when rewriting
; pointers in such a way that we try to GEP through the opaque type. Previously,
; a check for an unsized type was missing and this crashed. Ensure it behaves
; reasonably now.
; CHECK-LABEL: @test19_addrspacecast(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CAST1:%.*]] = addrspacecast %opaque* [[X:%.*]] to i8 addrspace(1)*
; CHECK-NEXT: [[A_SROA_0_0_CAST1_SROA_CAST:%.*]] = bitcast i8 addrspace(1)* [[CAST1]] to i64 addrspace(1)*
; CHECK-NEXT: [[A_SROA_0_0_COPYLOAD:%.*]] = load i64, i64 addrspace(1)* [[A_SROA_0_0_CAST1_SROA_CAST]], align 1
; CHECK-NEXT: [[A_SROA_2_0_CAST1_SROA_IDX:%.*]] = getelementptr inbounds i8, i8 addrspace(1)* [[CAST1]], i16 8
; CHECK-NEXT: [[A_SROA_2_0_CAST1_SROA_CAST:%.*]] = bitcast i8 addrspace(1)* [[A_SROA_2_0_CAST1_SROA_IDX]] to i8* addrspace(1)*
; CHECK-NEXT: [[A_SROA_2_0_COPYLOAD:%.*]] = load i8*, i8* addrspace(1)* [[A_SROA_2_0_CAST1_SROA_CAST]], align 1
; CHECK-NEXT: ret i32 undef
;
entry:
%a = alloca { i64, i8* }
%cast1 = addrspacecast %opaque* %x to i8 addrspace(1)*
%cast2 = bitcast { i64, i8* }* %a to i8*
call void @llvm.memcpy.p0i8.p1i8.i32(i8* %cast2, i8 addrspace(1)* %cast1, i32 16, i32 1, i1 false)
%gep = getelementptr inbounds { i64, i8* }, { i64, i8* }* %a, i32 0, i32 0
%val = load i64, i64* %gep
ret i32 undef
}
define i32 @test20() {
; Ensure we can track negative offsets (before the beginning of the alloca) and
; negative relative offsets from offsets starting past the end of the alloca.
; CHECK-LABEL: @test20(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[SUM1:%.*]] = add i32 1, 2
; CHECK-NEXT: [[SUM2:%.*]] = add i32 [[SUM1]], 3
; CHECK-NEXT: ret i32 [[SUM2]]
;
entry:
%a = alloca [3 x i32]
%gep1 = getelementptr [3 x i32], [3 x i32]* %a, i32 0, i32 0
store i32 1, i32* %gep1
%gep2.1 = getelementptr [3 x i32], [3 x i32]* %a, i32 0, i32 -2
%gep2.2 = getelementptr i32, i32* %gep2.1, i32 3
store i32 2, i32* %gep2.2
%gep3.1 = getelementptr [3 x i32], [3 x i32]* %a, i32 0, i32 14
%gep3.2 = getelementptr i32, i32* %gep3.1, i32 -12
store i32 3, i32* %gep3.2
%load1 = load i32, i32* %gep1
%load2 = load i32, i32* %gep2.2
%load3 = load i32, i32* %gep3.2
%sum1 = add i32 %load1, %load2
%sum2 = add i32 %sum1, %load3
ret i32 %sum2
}
declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind
define i8 @test21() {
; Test allocations and offsets which border on overflow of the int64_t used
; internally. This is really awkward to really test as LLVM doesn't really
; support such extreme constructs cleanly.
; CHECK-LABEL: @test21(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RESULT:%.*]] = or i8 -1, -1
; CHECK-NEXT: ret i8 [[RESULT]]
;
entry:
%a = alloca [2305843009213693951 x i8]
%gep0 = getelementptr [2305843009213693951 x i8], [2305843009213693951 x i8]* %a, i64 0, i64 2305843009213693949
store i8 255, i8* %gep0
%gep1 = getelementptr [2305843009213693951 x i8], [2305843009213693951 x i8]* %a, i64 0, i64 -9223372036854775807
%gep2 = getelementptr i8, i8* %gep1, i64 -1
call void @llvm.memset.p0i8.i64(i8* %gep2, i8 0, i64 18446744073709551615, i1 false)
%gep3 = getelementptr i8, i8* %gep1, i64 9223372036854775807
%gep4 = getelementptr i8, i8* %gep3, i64 9223372036854775807
%gep5 = getelementptr i8, i8* %gep4, i64 -6917529027641081857
store i8 255, i8* %gep5
%cast1 = bitcast i8* %gep4 to i32*
store i32 0, i32* %cast1
%load = load i8, i8* %gep0
%gep6 = getelementptr i8, i8* %gep0, i32 1
%load2 = load i8, i8* %gep6
%result = or i8 %load, %load2
ret i8 %result
}
%PR13916.struct = type { i8 }
define void @PR13916.1() {
; Ensure that we handle overlapping memcpy intrinsics correctly, especially in
; the case where there is a directly identical value for both source and dest.
; CHECK-LABEL: @PR13916.1(
; CHECK-NEXT: entry:
; CHECK-NEXT: ret void
;
entry:
%a = alloca i8
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a, i8* %a, i32 1, i1 false)
%tmp2 = load i8, i8* %a
ret void
}
define void @PR13916.2() {
; Check whether we continue to handle them correctly when they start off with
; different pointer value chains, but during rewriting we coalesce them into the
; same value.
; CHECK-LABEL: @PR13916.2(
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 undef, label [[IF_THEN:%.*]], label [[IF_END:%.*]]
; CHECK: if.then:
; CHECK-NEXT: br label [[IF_END]]
; CHECK: if.end:
; CHECK-NEXT: ret void
;
entry:
%a = alloca %PR13916.struct, align 1
br i1 undef, label %if.then, label %if.end
if.then:
%tmp0 = bitcast %PR13916.struct* %a to i8*
%tmp1 = bitcast %PR13916.struct* %a to i8*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp0, i8* %tmp1, i32 1, i1 false)
br label %if.end
if.end:
%gep = getelementptr %PR13916.struct, %PR13916.struct* %a, i32 0, i32 0
%tmp2 = load i8, i8* %gep
ret void
}
define void @PR13990() {
; Ensure we can handle cases where processing one alloca causes the other
; alloca to become dead and get deleted. This might crash or fail under
; Valgrind if we regress.
; CHECK-LABEL: @PR13990(
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 undef, label [[BB1:%.*]], label [[BB2:%.*]]
; CHECK: bb1:
; CHECK-NEXT: br i1 undef, label [[BB2]], label [[BB3:%.*]]
; CHECK: bb2:
; CHECK-NEXT: br i1 undef, label [[BB3]], label [[BB4:%.*]]
; CHECK: bb3:
; CHECK-NEXT: unreachable
; CHECK: bb4:
; CHECK-NEXT: unreachable
;
entry:
%tmp1 = alloca i8*
%tmp2 = alloca i8*
br i1 undef, label %bb1, label %bb2
bb1:
store i8* undef, i8** %tmp2
br i1 undef, label %bb2, label %bb3
bb2:
%tmp50 = select i1 undef, i8** %tmp2, i8** %tmp1
br i1 undef, label %bb3, label %bb4
bb3:
unreachable
bb4:
unreachable
}
define double @PR13969(double %x) {
; Check that we detect when promotion will un-escape an alloca and iterate to
; re-try running SROA over that alloca. Without that, the two allocas that are
; stored into a dead alloca don't get rewritten and promoted.
; CHECK-LABEL: @PR13969(
; CHECK-NEXT: entry:
; CHECK-NEXT: ret double [[X:%.*]]
;
entry:
%a = alloca double
%b = alloca double*
%c = alloca double
store double %x, double* %a
store double* %c, double** %b
store double* %a, double** %b
store double %x, double* %c
%ret = load double, double* %a
ret double %ret
}
%PR14034.struct = type { { {} }, i32, %PR14034.list }
%PR14034.list = type { %PR14034.list*, %PR14034.list* }
define void @PR14034() {
; This test case tries to form GEPs into the empty leading struct members, and
; subsequently crashed (under valgrind) before we fixed the PR. The important
; thing is to handle empty structs gracefully.
; CHECK-LABEL: @PR14034(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A_SROA_0:%.*]] = alloca [12 x i8], align 8
; CHECK-NEXT: [[A_SROA_0_0_CAST1_SROA_IDX:%.*]] = getelementptr inbounds [12 x i8], [12 x i8]* [[A_SROA_0]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 undef, i8* align 8 [[A_SROA_0_0_CAST1_SROA_IDX]], i32 12, i1 false)
; CHECK-NEXT: ret void
;
entry:
%a = alloca %PR14034.struct
%list = getelementptr %PR14034.struct, %PR14034.struct* %a, i32 0, i32 2
%prev = getelementptr %PR14034.list, %PR14034.list* %list, i32 0, i32 1
store %PR14034.list* undef, %PR14034.list** %prev
%cast0 = bitcast %PR14034.struct* undef to i8*
%cast1 = bitcast %PR14034.struct* %a to i8*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %cast0, i8* %cast1, i32 12, i1 false)
ret void
}
define i32 @test22(i32 %x) {
; Test that SROA and promotion is not confused by a grab bax mixture of pointer
; types involving wrapper aggregates and zero-length aggregate members.
; CHECK-LABEL: @test22(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[WRAP1:%.*]] = insertvalue [1 x { i32 }] undef, i32 [[X:%.*]], 0, 0
; CHECK-NEXT: [[WRAP1_FCA_0_0_EXTRACT:%.*]] = extractvalue [1 x { i32 }] [[WRAP1]], 0, 0
; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32 [[WRAP1_FCA_0_0_EXTRACT]] to float
; CHECK-NEXT: [[LOAD1_FCA_0_0_0_INSERT:%.*]] = insertvalue { [1 x { float }] } undef, float [[TMP0]], 0, 0, 0
; CHECK-NEXT: [[UNWRAP1:%.*]] = extractvalue { [1 x { float }] } [[LOAD1_FCA_0_0_0_INSERT]], 0, 0
; CHECK-NEXT: [[WRAP2:%.*]] = insertvalue { {}, { float }, [0 x i8] } undef, { float } [[UNWRAP1]], 1
; CHECK-NEXT: [[WRAP2_FCA_1_0_EXTRACT:%.*]] = extractvalue { {}, { float }, [0 x i8] } [[WRAP2]], 1, 0
; CHECK-NEXT: [[TMP1:%.*]] = bitcast float [[WRAP2_FCA_1_0_EXTRACT]] to <4 x i8>
; CHECK-NEXT: [[VALCAST1:%.*]] = bitcast <4 x i8> [[TMP1]] to i32
; CHECK-NEXT: [[WRAP3:%.*]] = insertvalue [1 x [1 x i32]] undef, i32 [[VALCAST1]], 0, 0
; CHECK-NEXT: [[WRAP4:%.*]] = insertvalue { [1 x [1 x i32]], {} } undef, [1 x [1 x i32]] [[WRAP3]], 0
; CHECK-NEXT: [[WRAP4_FCA_0_0_0_EXTRACT:%.*]] = extractvalue { [1 x [1 x i32]], {} } [[WRAP4]], 0, 0, 0
; CHECK-NEXT: [[TMP2:%.*]] = bitcast i32 [[WRAP4_FCA_0_0_0_EXTRACT]] to <4 x i8>
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i8> [[TMP2]] to float
; CHECK-NEXT: [[LOAD4_FCA_1_INSERT:%.*]] = insertvalue { {}, float, {} } undef, float [[TMP3]], 1
; CHECK-NEXT: [[UNWRAP2:%.*]] = extractvalue { {}, float, {} } [[LOAD4_FCA_1_INSERT]], 1
; CHECK-NEXT: [[VALCAST2:%.*]] = bitcast float [[UNWRAP2]] to i32
; CHECK-NEXT: ret i32 [[VALCAST2]]
;
entry:
%a1 = alloca { { [1 x { i32 }] } }
%a2 = alloca { {}, { float }, [0 x i8] }
%a3 = alloca { [0 x i8], { [0 x double], [1 x [1 x <4 x i8>]], {} }, { { {} } } }
%wrap1 = insertvalue [1 x { i32 }] undef, i32 %x, 0, 0
%gep1 = getelementptr { { [1 x { i32 }] } }, { { [1 x { i32 }] } }* %a1, i32 0, i32 0, i32 0
store [1 x { i32 }] %wrap1, [1 x { i32 }]* %gep1
%gep2 = getelementptr { { [1 x { i32 }] } }, { { [1 x { i32 }] } }* %a1, i32 0, i32 0
%ptrcast1 = bitcast { [1 x { i32 }] }* %gep2 to { [1 x { float }] }*
%load1 = load { [1 x { float }] }, { [1 x { float }] }* %ptrcast1
%unwrap1 = extractvalue { [1 x { float }] } %load1, 0, 0
%wrap2 = insertvalue { {}, { float }, [0 x i8] } undef, { float } %unwrap1, 1
store { {}, { float }, [0 x i8] } %wrap2, { {}, { float }, [0 x i8] }* %a2
%gep3 = getelementptr { {}, { float }, [0 x i8] }, { {}, { float }, [0 x i8] }* %a2, i32 0, i32 1, i32 0
%ptrcast2 = bitcast float* %gep3 to <4 x i8>*
%load3 = load <4 x i8>, <4 x i8>* %ptrcast2
%valcast1 = bitcast <4 x i8> %load3 to i32
%wrap3 = insertvalue [1 x [1 x i32]] undef, i32 %valcast1, 0, 0
%wrap4 = insertvalue { [1 x [1 x i32]], {} } undef, [1 x [1 x i32]] %wrap3, 0
%gep4 = getelementptr { [0 x i8], { [0 x double], [1 x [1 x <4 x i8>]], {} }, { { {} } } }, { [0 x i8], { [0 x double], [1 x [1 x <4 x i8>]], {} }, { { {} } } }* %a3, i32 0, i32 1
%ptrcast3 = bitcast { [0 x double], [1 x [1 x <4 x i8>]], {} }* %gep4 to { [1 x [1 x i32]], {} }*
store { [1 x [1 x i32]], {} } %wrap4, { [1 x [1 x i32]], {} }* %ptrcast3
%gep5 = getelementptr { [0 x i8], { [0 x double], [1 x [1 x <4 x i8>]], {} }, { { {} } } }, { [0 x i8], { [0 x double], [1 x [1 x <4 x i8>]], {} }, { { {} } } }* %a3, i32 0, i32 1, i32 1, i32 0
%ptrcast4 = bitcast [1 x <4 x i8>]* %gep5 to { {}, float, {} }*
%load4 = load { {}, float, {} }, { {}, float, {} }* %ptrcast4
%unwrap2 = extractvalue { {}, float, {} } %load4, 1
%valcast2 = bitcast float %unwrap2 to i32
ret i32 %valcast2
}
define void @PR14059.1(double* %d) {
; In PR14059 a peculiar construct was identified as something that is used
; pervasively in ARM's ABI-calling-convention lowering: the passing of a struct
; of doubles via an array of i32 in order to place the data into integer
; registers. This in turn was missed as an optimization by SROA due to the
; partial loads and stores of integers to the double alloca we were trying to
; form and promote. The solution is to widen the integer operations to be
; whole-alloca operations, and perform the appropriate bitcasting on the
; *values* rather than the pointers. When this works, partial reads and writes
; via integers can be promoted away.
; CHECK-LABEL: @PR14059.1(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = bitcast double undef to i64
; CHECK-NEXT: [[X_SROA_0_I_0_INSERT_MASK:%.*]] = and i64 [[TMP0]], -4294967296
; CHECK-NEXT: [[TMP1:%.*]] = bitcast i64 [[X_SROA_0_I_0_INSERT_MASK]] to double
; CHECK-NEXT: [[TMP2:%.*]] = bitcast double [[TMP1]] to i64
; CHECK-NEXT: [[X_SROA_0_I_2_INSERT_MASK:%.*]] = and i64 [[TMP2]], -281474976645121
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i64 [[X_SROA_0_I_2_INSERT_MASK]] to double
; CHECK-NEXT: [[TMP4:%.*]] = bitcast double [[TMP3]] to i64
; CHECK-NEXT: [[X_SROA_0_I_4_D_RAW_SROA_CAST:%.*]] = bitcast double* [[D:%.*]] to i32*
; CHECK-NEXT: [[X_SROA_0_I_4_COPYLOAD:%.*]] = load i32, i32* [[X_SROA_0_I_4_D_RAW_SROA_CAST]], align 1
; CHECK-NEXT: [[TMP5:%.*]] = bitcast double 0.000000e+00 to i64
; CHECK-NEXT: [[X_SROA_0_I_4_INSERT_EXT:%.*]] = zext i32 [[X_SROA_0_I_4_COPYLOAD]] to i64
; CHECK-NEXT: [[X_SROA_0_I_4_INSERT_SHIFT:%.*]] = shl i64 [[X_SROA_0_I_4_INSERT_EXT]], 32
; CHECK-NEXT: [[X_SROA_0_I_4_INSERT_MASK4:%.*]] = and i64 [[TMP5]], 4294967295
; CHECK-NEXT: [[X_SROA_0_I_4_INSERT_INSERT5:%.*]] = or i64 [[X_SROA_0_I_4_INSERT_MASK4]], [[X_SROA_0_I_4_INSERT_SHIFT]]
; CHECK-NEXT: [[TMP6:%.*]] = bitcast i64 [[X_SROA_0_I_4_INSERT_INSERT5]] to double
; CHECK-NEXT: [[TMP7:%.*]] = bitcast double [[TMP6]] to i64
; CHECK-NEXT: [[X_SROA_0_I_4_INSERT_MASK:%.*]] = and i64 [[TMP7]], 4294967295
; CHECK-NEXT: [[X_SROA_0_I_4_INSERT_INSERT:%.*]] = or i64 [[X_SROA_0_I_4_INSERT_MASK]], 4607182418800017408
; CHECK-NEXT: [[TMP8:%.*]] = bitcast i64 [[X_SROA_0_I_4_INSERT_INSERT]] to double
; CHECK-NEXT: [[ACCUM_REAL_I:%.*]] = load double, double* [[D]], align 8
; CHECK-NEXT: [[ADD_R_I:%.*]] = fadd double [[ACCUM_REAL_I]], [[TMP8]]
; CHECK-NEXT: store double [[ADD_R_I]], double* [[D]], align 8
; CHECK-NEXT: ret void
;
entry:
%X.sroa.0.i = alloca double, align 8
%0 = bitcast double* %X.sroa.0.i to i8*
call void @llvm.lifetime.start.p0i8(i64 -1, i8* %0)
; Store to the low 32-bits...
%X.sroa.0.0.cast2.i = bitcast double* %X.sroa.0.i to i32*
store i32 0, i32* %X.sroa.0.0.cast2.i, align 8
; Also use a memset to the middle 32-bits for fun.
%X.sroa.0.2.raw_idx2.i = getelementptr inbounds i8, i8* %0, i32 2
call void @llvm.memset.p0i8.i64(i8* %X.sroa.0.2.raw_idx2.i, i8 0, i64 4, i1 false)
; Or a memset of the whole thing.
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 8, i1 false)
; Write to the high 32-bits with a memcpy.
%X.sroa.0.4.raw_idx4.i = getelementptr inbounds i8, i8* %0, i32 4
%d.raw = bitcast double* %d to i8*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %X.sroa.0.4.raw_idx4.i, i8* %d.raw, i32 4, i1 false)
; Store to the high 32-bits...
%X.sroa.0.4.cast5.i = bitcast i8* %X.sroa.0.4.raw_idx4.i to i32*
store i32 1072693248, i32* %X.sroa.0.4.cast5.i, align 4
; Do the actual math...
%X.sroa.0.0.load1.i = load double, double* %X.sroa.0.i, align 8
%accum.real.i = load double, double* %d, align 8
%add.r.i = fadd double %accum.real.i, %X.sroa.0.0.load1.i
store double %add.r.i, double* %d, align 8
call void @llvm.lifetime.end.p0i8(i64 -1, i8* %0)
ret void
}
define i64 @PR14059.2({ float, float }* %phi) {
; Check that SROA can split up alloca-wide integer loads and stores where the
; underlying alloca has smaller components that are accessed independently. This
; shows up particularly with ABI lowering patterns coming out of Clang that rely
; on the particular register placement of a single large integer return value.
; CHECK-LABEL: @PR14059.2(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[PHI_REALP:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[PHI:%.*]], i32 0, i32 0
; CHECK-NEXT: [[PHI_REAL:%.*]] = load float, float* [[PHI_REALP]], align 4
; CHECK-NEXT: [[PHI_IMAGP:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[PHI]], i32 0, i32 1
; CHECK-NEXT: [[PHI_IMAG:%.*]] = load float, float* [[PHI_IMAGP]], align 4
; CHECK-NEXT: [[TMP0:%.*]] = bitcast float [[PHI_REAL]] to i32
; CHECK-NEXT: [[TMP1:%.*]] = bitcast float [[PHI_IMAG]] to i32
; CHECK-NEXT: [[RETVAL_SROA_3_0_INSERT_EXT:%.*]] = zext i32 [[TMP1]] to i64
; CHECK-NEXT: [[RETVAL_SROA_3_0_INSERT_SHIFT:%.*]] = shl i64 [[RETVAL_SROA_3_0_INSERT_EXT]], 32
; CHECK-NEXT: [[RETVAL_SROA_3_0_INSERT_MASK:%.*]] = and i64 undef, 4294967295
; CHECK-NEXT: [[RETVAL_SROA_3_0_INSERT_INSERT:%.*]] = or i64 [[RETVAL_SROA_3_0_INSERT_MASK]], [[RETVAL_SROA_3_0_INSERT_SHIFT]]
; CHECK-NEXT: [[RETVAL_SROA_0_0_INSERT_EXT:%.*]] = zext i32 [[TMP0]] to i64
; CHECK-NEXT: [[RETVAL_SROA_0_0_INSERT_MASK:%.*]] = and i64 [[RETVAL_SROA_3_0_INSERT_INSERT]], -4294967296
; CHECK-NEXT: [[RETVAL_SROA_0_0_INSERT_INSERT:%.*]] = or i64 [[RETVAL_SROA_0_0_INSERT_MASK]], [[RETVAL_SROA_0_0_INSERT_EXT]]
; CHECK-NEXT: ret i64 [[RETVAL_SROA_0_0_INSERT_INSERT]]
;
entry:
%retval = alloca { float, float }, align 4
%0 = bitcast { float, float }* %retval to i64*
store i64 0, i64* %0
%phi.realp = getelementptr inbounds { float, float }, { float, float }* %phi, i32 0, i32 0
%phi.real = load float, float* %phi.realp
%phi.imagp = getelementptr inbounds { float, float }, { float, float }* %phi, i32 0, i32 1
%phi.imag = load float, float* %phi.imagp
%real = getelementptr inbounds { float, float }, { float, float }* %retval, i32 0, i32 0
%imag = getelementptr inbounds { float, float }, { float, float }* %retval, i32 0, i32 1
store float %phi.real, float* %real
store float %phi.imag, float* %imag
%1 = load i64, i64* %0, align 1
ret i64 %1
}
define void @PR14105({ [16 x i8] }* %ptr) {
; Ensure that when rewriting the GEP index '-1' for this alloca we preserve is
; sign as negative. We use a volatile memcpy to ensure promotion never actually
; occurs.
; CHECK-LABEL: @PR14105(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A_SROA_0:%.*]] = alloca [16 x i8], align 8
; CHECK-NEXT: [[A_SROA_0_0_CAST1_SROA_IDX:%.*]] = getelementptr inbounds { [16 x i8] }, { [16 x i8] }* [[PTR:%.*]], i64 -1, i32 0, i64 0
; CHECK-NEXT: [[A_SROA_0_0_CAST2_SROA_IDX:%.*]] = getelementptr inbounds [16 x i8], [16 x i8]* [[A_SROA_0]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 [[A_SROA_0_0_CAST1_SROA_IDX]], i8* align 8 [[A_SROA_0_0_CAST2_SROA_IDX]], i32 16, i1 true)
; CHECK-NEXT: ret void
;
entry:
%a = alloca { [16 x i8] }, align 8
%gep = getelementptr inbounds { [16 x i8] }, { [16 x i8] }* %ptr, i64 -1
%cast1 = bitcast { [16 x i8 ] }* %gep to i8*
%cast2 = bitcast { [16 x i8 ] }* %a to i8*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %cast1, i8* align 8 %cast2, i32 16, i1 true)
ret void
}
define void @PR14105_as1({ [16 x i8] } addrspace(1)* %ptr) {
; Make sure this the right address space pointer is used for type check.
; CHECK-LABEL: @PR14105_as1(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A:%.*]] = alloca { [16 x i8] }, align 8
; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds { [16 x i8] }, { [16 x i8] } addrspace(1)* [[PTR:%.*]], i64 -1
; CHECK-NEXT: [[CAST1:%.*]] = bitcast { [16 x i8] } addrspace(1)* [[GEP]] to i8 addrspace(1)*
; CHECK-NEXT: [[CAST2:%.*]] = bitcast { [16 x i8] }* [[A]] to i8*
; CHECK-NEXT: call void @llvm.memcpy.p1i8.p0i8.i32(i8 addrspace(1)* align 8 [[CAST1]], i8* align 8 [[CAST2]], i32 16, i1 true)
; CHECK-NEXT: ret void
;
entry:
%a = alloca { [16 x i8] }, align 8
%gep = getelementptr inbounds { [16 x i8] }, { [16 x i8] } addrspace(1)* %ptr, i64 -1
%cast1 = bitcast { [16 x i8 ] } addrspace(1)* %gep to i8 addrspace(1)*
%cast2 = bitcast { [16 x i8 ] }* %a to i8*
call void @llvm.memcpy.p1i8.p0i8.i32(i8 addrspace(1)* align 8 %cast1, i8* align 8 %cast2, i32 16, i1 true)
ret void
}
define void @PR14465() {
; Ensure that we don't crash when analyzing a alloca larger than the maximum
; integer type width (MAX_INT_BITS) supported by llvm (1048576*32 > (1<<23)-1).
; CHECK-LABEL: @PR14465(
; CHECK-NEXT: [[STACK:%.*]] = alloca [1048576 x i32], align 16
; CHECK-NEXT: [[STACK_0_CAST_SROA_CAST:%.*]] = bitcast [1048576 x i32]* [[STACK]] to i8*
; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 16 [[STACK_0_CAST_SROA_CAST]], i8 -2, i64 4194304, i1 false)
; CHECK-NEXT: ret void
;
%stack = alloca [1048576 x i32], align 16
%cast = bitcast [1048576 x i32]* %stack to i8*
call void @llvm.memset.p0i8.i64(i8* align 16 %cast, i8 -2, i64 4194304, i1 false)
ret void
}
define void @PR14548(i1 %x) {
; Handle a mixture of i1 and i8 loads and stores to allocas. This particular
; pattern caused crashes and invalid output in the PR, and its nature will
; trigger a mixture in several permutations as we resolve each alloca
; iteratively.
; Note that we don't do a particularly good *job* of handling these mixtures,
; but the hope is that this is very rare.
; CHECK-LABEL: @PR14548(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A_SROA_0:%.*]] = alloca i8, align 8
; CHECK-NEXT: [[B_SROA_0:%.*]] = alloca i8, align 8
; CHECK-NEXT: [[B_SROA_0_0_B_I1_SROA_CAST1:%.*]] = bitcast i8* [[B_SROA_0]] to i1*
; CHECK-NEXT: store i1 [[X:%.*]], i1* [[B_SROA_0_0_B_I1_SROA_CAST1]], align 8
; CHECK-NEXT: [[B_SROA_0_0_B_SROA_0_0_FOO:%.*]] = load i8, i8* [[B_SROA_0]], align 8
; CHECK-NEXT: [[B_SROA_0_0_B_SROA_0_0_COPYLOAD:%.*]] = load i8, i8* [[B_SROA_0]], align 8
; CHECK-NEXT: store i8 [[B_SROA_0_0_B_SROA_0_0_COPYLOAD]], i8* [[A_SROA_0]], align 8
; CHECK-NEXT: [[A_SROA_0_0_A_SROA_0_0_BAR:%.*]] = load i8, i8* [[A_SROA_0]], align 8
; CHECK-NEXT: [[A_SROA_0_0_A_I1_SROA_CAST2:%.*]] = bitcast i8* [[A_SROA_0]] to i1*
; CHECK-NEXT: [[A_SROA_0_0_A_SROA_0_0_BAZ:%.*]] = load i1, i1* [[A_SROA_0_0_A_I1_SROA_CAST2]], align 8
; CHECK-NEXT: ret void
;
entry:
%a = alloca <{ i1 }>, align 8
%b = alloca <{ i1 }>, align 8
%b.i1 = bitcast <{ i1 }>* %b to i1*
store i1 %x, i1* %b.i1, align 8
%b.i8 = bitcast <{ i1 }>* %b to i8*
%foo = load i8, i8* %b.i8, align 1
%a.i8 = bitcast <{ i1 }>* %a to i8*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a.i8, i8* %b.i8, i32 1, i1 false) nounwind
%bar = load i8, i8* %a.i8, align 1
%a.i1 = getelementptr inbounds <{ i1 }>, <{ i1 }>* %a, i32 0, i32 0
%baz = load i1, i1* %a.i1, align 1
ret void
}
define <3 x i8> @PR14572.1(i32 %x) {
; Ensure that a split integer store which is wider than the type size of the
; alloca (relying on the alloc size padding) doesn't trigger an assert.
; CHECK-LABEL: @PR14572.1(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A_0_EXTRACT_TRUNC:%.*]] = trunc i32 [[X:%.*]] to i24
; CHECK-NEXT: [[TMP0:%.*]] = bitcast i24 [[A_0_EXTRACT_TRUNC]] to <3 x i8>
; CHECK-NEXT: [[A_SROA_2_0_EXTRACT_SHIFT:%.*]] = lshr i32 [[X]], 24
; CHECK-NEXT: [[A_SROA_2_0_EXTRACT_TRUNC:%.*]] = trunc i32 [[A_SROA_2_0_EXTRACT_SHIFT]] to i8
; CHECK-NEXT: ret <3 x i8> [[TMP0]]
;
entry:
%a = alloca <3 x i8>, align 4
%cast = bitcast <3 x i8>* %a to i32*
store i32 %x, i32* %cast, align 1
%y = load <3 x i8>, <3 x i8>* %a, align 4
ret <3 x i8> %y
}
define i32 @PR14572.2(<3 x i8> %x) {
; Ensure that a split integer load which is wider than the type size of the
; alloca (relying on the alloc size padding) doesn't trigger an assert.
; CHECK-LABEL: @PR14572.2(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <3 x i8> [[X:%.*]] to i24
; CHECK-NEXT: [[A_SROA_2_0_INSERT_EXT:%.*]] = zext i8 undef to i32
; CHECK-NEXT: [[A_SROA_2_0_INSERT_SHIFT:%.*]] = shl i32 [[A_SROA_2_0_INSERT_EXT]], 24
; CHECK-NEXT: [[A_SROA_2_0_INSERT_MASK:%.*]] = and i32 undef, 16777215
; CHECK-NEXT: [[A_SROA_2_0_INSERT_INSERT:%.*]] = or i32 [[A_SROA_2_0_INSERT_MASK]], [[A_SROA_2_0_INSERT_SHIFT]]
; CHECK-NEXT: [[A_0_INSERT_EXT:%.*]] = zext i24 [[TMP0]] to i32
; CHECK-NEXT: [[A_0_INSERT_MASK:%.*]] = and i32 [[A_SROA_2_0_INSERT_INSERT]], -16777216
; CHECK-NEXT: [[A_0_INSERT_INSERT:%.*]] = or i32 [[A_0_INSERT_MASK]], [[A_0_INSERT_EXT]]
; CHECK-NEXT: ret i32 [[A_0_INSERT_INSERT]]
;
entry:
%a = alloca <3 x i8>, align 4
store <3 x i8> %x, <3 x i8>* %a, align 1
%cast = bitcast <3 x i8>* %a to i32*
%y = load i32, i32* %cast, align 4
ret i32 %y
}
define i32 @PR14601(i32 %x) {
; Don't try to form a promotable integer alloca when there is a variable length
; memory intrinsic.
; CHECK-LABEL: @PR14601(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[A_0_A_I8_SROA_CAST:%.*]] = bitcast i32* [[A]] to i8*
; CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* align 4 [[A_0_A_I8_SROA_CAST]], i8 0, i32 [[X:%.*]], i1 false)
; CHECK-NEXT: [[A_0_V:%.*]] = load i32, i32* [[A]], align 4
; CHECK-NEXT: ret i32 [[A_0_V]]
;
entry:
%a = alloca i32
%a.i8 = bitcast i32* %a to i8*
call void @llvm.memset.p0i8.i32(i8* %a.i8, i8 0, i32 %x, i1 false)
%v = load i32, i32* %a
ret i32 %v
}
define void @PR15674(i8* %data, i8* %src, i32 %size) {
; Arrange (via control flow) to have unmerged stores of a particular width to
; an alloca where we incrementally store from the end of the array toward the
; beginning of the array. Ensure that the final integer store, despite being
; convertable to the integer type that we end up promoting this alloca toward,
; doesn't get widened to a full alloca store.
; CHECK-LABEL: @PR15674(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP_SROA_0:%.*]] = alloca i32, align 4
; CHECK-NEXT: switch i32 [[SIZE:%.*]], label [[END:%.*]] [
; CHECK-NEXT: i32 4, label [[BB4:%.*]]
; CHECK-NEXT: i32 3, label [[BB3:%.*]]
; CHECK-NEXT: i32 2, label [[BB2:%.*]]
; CHECK-NEXT: i32 1, label [[BB1:%.*]]
; CHECK-NEXT: ]
; CHECK: bb4:
; CHECK-NEXT: [[SRC_GEP3:%.*]] = getelementptr inbounds i8, i8* [[SRC:%.*]], i32 3
; CHECK-NEXT: [[SRC_3:%.*]] = load i8, i8* [[SRC_GEP3]], align 1
; CHECK-NEXT: [[TMP_SROA_0_3_TMP_GEP3_SROA_RAW_CAST7:%.*]] = bitcast i32* [[TMP_SROA_0]] to i8*
; CHECK-NEXT: [[TMP_SROA_0_3_TMP_GEP3_SROA_RAW_IDX8:%.*]] = getelementptr inbounds i8, i8* [[TMP_SROA_0_3_TMP_GEP3_SROA_RAW_CAST7]], i64 3
; CHECK-NEXT: store i8 [[SRC_3]], i8* [[TMP_SROA_0_3_TMP_GEP3_SROA_RAW_IDX8]], align 1
; CHECK-NEXT: br label [[BB3]]
; CHECK: bb3:
; CHECK-NEXT: [[SRC_GEP2:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i32 2
; CHECK-NEXT: [[SRC_2:%.*]] = load i8, i8* [[SRC_GEP2]], align 1
; CHECK-NEXT: [[TMP_SROA_0_2_TMP_GEP2_SROA_RAW_CAST5:%.*]] = bitcast i32* [[TMP_SROA_0]] to i8*
; CHECK-NEXT: [[TMP_SROA_0_2_TMP_GEP2_SROA_RAW_IDX6:%.*]] = getelementptr inbounds i8, i8* [[TMP_SROA_0_2_TMP_GEP2_SROA_RAW_CAST5]], i64 2
; CHECK-NEXT: store i8 [[SRC_2]], i8* [[TMP_SROA_0_2_TMP_GEP2_SROA_RAW_IDX6]], align 2
; CHECK-NEXT: br label [[BB2]]
; CHECK: bb2:
; CHECK-NEXT: [[SRC_GEP1:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i32 1
; CHECK-NEXT: [[SRC_1:%.*]] = load i8, i8* [[SRC_GEP1]], align 1
; CHECK-NEXT: [[TMP_SROA_0_1_TMP_GEP1_SROA_RAW_CAST3:%.*]] = bitcast i32* [[TMP_SROA_0]] to i8*
; CHECK-NEXT: [[TMP_SROA_0_1_TMP_GEP1_SROA_RAW_IDX4:%.*]] = getelementptr inbounds i8, i8* [[TMP_SROA_0_1_TMP_GEP1_SROA_RAW_CAST3]], i64 1
; CHECK-NEXT: store i8 [[SRC_1]], i8* [[TMP_SROA_0_1_TMP_GEP1_SROA_RAW_IDX4]], align 1
; CHECK-NEXT: br label [[BB1]]
; CHECK: bb1:
; CHECK-NEXT: [[SRC_GEP0:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i32 0
; CHECK-NEXT: [[SRC_0:%.*]] = load i8, i8* [[SRC_GEP0]], align 1
; CHECK-NEXT: [[TMP_SROA_0_0_TMP_GEP0_SROA_CAST2:%.*]] = bitcast i32* [[TMP_SROA_0]] to i8*
; CHECK-NEXT: store i8 [[SRC_0]], i8* [[TMP_SROA_0_0_TMP_GEP0_SROA_CAST2]], align 4
; CHECK-NEXT: br label [[END]]
; CHECK: end:
; CHECK-NEXT: [[TMP_SROA_0_0_TMP_RAW_SROA_CAST1:%.*]] = bitcast i32* [[TMP_SROA_0]] to i8*
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[DATA:%.*]], i8* align 4 [[TMP_SROA_0_0_TMP_RAW_SROA_CAST1]], i32 [[SIZE]], i1 false)
; CHECK-NEXT: ret void
;
entry:
%tmp = alloca [4 x i8], align 1
switch i32 %size, label %end [
i32 4, label %bb4
i32 3, label %bb3
i32 2, label %bb2
i32 1, label %bb1
]
bb4:
%src.gep3 = getelementptr inbounds i8, i8* %src, i32 3
%src.3 = load i8, i8* %src.gep3
%tmp.gep3 = getelementptr inbounds [4 x i8], [4 x i8]* %tmp, i32 0, i32 3
store i8 %src.3, i8* %tmp.gep3
br label %bb3
bb3:
%src.gep2 = getelementptr inbounds i8, i8* %src, i32 2
%src.2 = load i8, i8* %src.gep2
%tmp.gep2 = getelementptr inbounds [4 x i8], [4 x i8]* %tmp, i32 0, i32 2
store i8 %src.2, i8* %tmp.gep2
br label %bb2
bb2:
%src.gep1 = getelementptr inbounds i8, i8* %src, i32 1
%src.1 = load i8, i8* %src.gep1
%tmp.gep1 = getelementptr inbounds [4 x i8], [4 x i8]* %tmp, i32 0, i32 1
store i8 %src.1, i8* %tmp.gep1
br label %bb1
bb1:
%src.gep0 = getelementptr inbounds i8, i8* %src, i32 0
%src.0 = load i8, i8* %src.gep0
%tmp.gep0 = getelementptr inbounds [4 x i8], [4 x i8]* %tmp, i32 0, i32 0
store i8 %src.0, i8* %tmp.gep0
br label %end
end:
%tmp.raw = bitcast [4 x i8]* %tmp to i8*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %data, i8* %tmp.raw, i32 %size, i1 false)
ret void
}
define void @PR15805(i1 %a, i1 %b) {
; CHECK-LABEL: @PR15805(
; CHECK-NEXT: [[COND_SROA_SPECULATED:%.*]] = select i1 undef, i64 undef, i64 undef
; CHECK-NEXT: ret void
;
%c = alloca i64, align 8
%p.0.c = select i1 undef, i64* %c, i64* %c
%cond.in = select i1 undef, i64* %p.0.c, i64* %c
%cond = load i64, i64* %cond.in, align 8
ret void
}
define void @PR15805.1(i1 %a, i1 %b) {
; Same as the normal PR15805, but rigged to place the use before the def inside
; of looping unreachable code. This helps ensure that we aren't sensitive to the
; order in which the uses of the alloca are visited.
;
; CHECK-LABEL: @PR15805.1(
; CHECK-NEXT: br label [[EXIT:%.*]]
; CHECK: loop:
; CHECK-NEXT: [[COND_SROA_SPECULATED:%.*]] = select i1 undef, i64 undef, i64 undef
; CHECK-NEXT: br i1 undef, label [[LOOP:%.*]], label [[EXIT]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
%c = alloca i64, align 8
br label %exit
loop:
%cond.in = select i1 undef, i64* %c, i64* %p.0.c
%p.0.c = select i1 undef, i64* %c, i64* %c
%cond = load i64, i64* %cond.in, align 8
br i1 undef, label %loop, label %exit
exit:
ret void
}
define void @PR16651.1(i8* %a) {
; This test case caused a crash due to the volatile memcpy in combination with
; lowering to integer loads and stores of a width other than that of the original
; memcpy.
;
; CHECK-LABEL: @PR16651.1(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[B_SROA_0:%.*]] = alloca i16, align 4
; CHECK-NEXT: [[B_SROA_1:%.*]] = alloca i8, align 2
; CHECK-NEXT: [[B_SROA_2:%.*]] = alloca i8, align 1
; CHECK-NEXT: [[B_SROA_0_0_A_SROA_CAST:%.*]] = bitcast i8* [[A:%.*]] to i16*
; CHECK-NEXT: [[B_SROA_0_0_COPYLOAD:%.*]] = load volatile i16, i16* [[B_SROA_0_0_A_SROA_CAST]], align 4
; CHECK-NEXT: store volatile i16 [[B_SROA_0_0_COPYLOAD]], i16* [[B_SROA_0]], align 4
; CHECK-NEXT: [[B_SROA_1_0_A_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[A]], i64 2
; CHECK-NEXT: [[B_SROA_1_0_COPYLOAD:%.*]] = load volatile i8, i8* [[B_SROA_1_0_A_SROA_RAW_IDX]], align 2
; CHECK-NEXT: store volatile i8 [[B_SROA_1_0_COPYLOAD]], i8* [[B_SROA_1]], align 2
; CHECK-NEXT: [[B_SROA_2_0_A_SROA_RAW_IDX:%.*]] = getelementptr inbounds i8, i8* [[A]], i64 3
; CHECK-NEXT: [[B_SROA_2_0_COPYLOAD:%.*]] = load volatile i8, i8* [[B_SROA_2_0_A_SROA_RAW_IDX]], align 1
; CHECK-NEXT: store volatile i8 [[B_SROA_2_0_COPYLOAD]], i8* [[B_SROA_2]], align 1
; CHECK-NEXT: [[B_SROA_1_0_B_SROA_1_2_:%.*]] = load i8, i8* [[B_SROA_1]], align 2
; CHECK-NEXT: unreachable
;
entry:
%b = alloca i32, align 4
%b.cast = bitcast i32* %b to i8*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %b.cast, i8* align 4 %a, i32 4, i1 true)
%b.gep = getelementptr inbounds i8, i8* %b.cast, i32 2
load i8, i8* %b.gep, align 2
unreachable
}
define void @PR16651.2() {
; This test case caused a crash due to failing to promote given a select that
; can't be speculated. It shouldn't be promoted, but we missed that fact when
; analyzing whether we could form a vector promotion because that code didn't
; bail on select instructions.
;
; CHECK-LABEL: @PR16651.2(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TV1_SROA_0:%.*]] = alloca <2 x float>, align 8
; CHECK-NEXT: store <2 x float> undef, <2 x float>* [[TV1_SROA_0]], align 8
; CHECK-NEXT: [[TV1_SROA_0_0__SROA_IDX:%.*]] = getelementptr inbounds <2 x float>, <2 x float>* [[TV1_SROA_0]], i64 0, i32 0
; CHECK-NEXT: [[COND105_IN_I_I:%.*]] = select i1 undef, float* null, float* [[TV1_SROA_0_0__SROA_IDX]]
; CHECK-NEXT: [[COND105_I_I:%.*]] = load float, float* [[COND105_IN_I_I]], align 8
; CHECK-NEXT: ret void
;
entry:
%tv1 = alloca { <2 x float>, <2 x float> }, align 8
%0 = getelementptr { <2 x float>, <2 x float> }, { <2 x float>, <2 x float> }* %tv1, i64 0, i32 1
store <2 x float> undef, <2 x float>* %0, align 8
%1 = getelementptr inbounds { <2 x float>, <2 x float> }, { <2 x float>, <2 x float> }* %tv1, i64 0, i32 1, i64 0
%cond105.in.i.i = select i1 undef, float* null, float* %1
%cond105.i.i = load float, float* %cond105.in.i.i, align 8
ret void
}
define void @test23(i32 %x) {
; CHECK-LABEL: @test23(
; CHECK-NEXT: entry:
; CHECK-NEXT: ret void
;
entry:
%a = alloca i32, align 4
store i32 %x, i32* %a, align 4
%gep1 = getelementptr inbounds i32, i32* %a, i32 1
%gep0 = getelementptr inbounds i32, i32* %a, i32 0
%cast1 = bitcast i32* %gep1 to i8*
%cast0 = bitcast i32* %gep0 to i8*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %cast1, i8* %cast0, i32 4, i1 false)
ret void
}
define void @PR18615() {
; CHECK-LABEL: @PR18615(
; CHECK-NEXT: entry:
; CHECK-NEXT: ret void
;
entry:
%f = alloca i8
%gep = getelementptr i8, i8* %f, i64 -1
call void @llvm.memcpy.p0i8.p0i8.i32(i8* undef, i8* %gep, i32 1, i1 false)
ret void
}
define void @test24(i8* %src, i8* %dst) {
; CHECK-LABEL: @test24(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A:%.*]] = alloca i64, align 16
; CHECK-NEXT: [[A_0_SRC_SROA_CAST:%.*]] = bitcast i8* [[SRC:%.*]] to i64*
; CHECK-NEXT: [[A_0_COPYLOAD:%.*]] = load volatile i64, i64* [[A_0_SRC_SROA_CAST]], align 1, !tbaa [[TBAA0]]
; CHECK-NEXT: store volatile i64 [[A_0_COPYLOAD]], i64* [[A]], align 16, !tbaa [[TBAA0]]
; CHECK-NEXT: [[A_0_DST_SROA_CAST:%.*]] = bitcast i8* [[DST:%.*]] to i64*
; CHECK-NEXT: [[A_0_COPYLOAD1:%.*]] = load volatile i64, i64* [[A]], align 16, !tbaa [[TBAA3]]
; CHECK-NEXT: store volatile i64 [[A_0_COPYLOAD1]], i64* [[A_0_DST_SROA_CAST]], align 1, !tbaa [[TBAA3]]
; CHECK-NEXT: ret void
;
entry:
%a = alloca i64, align 16
%ptr = bitcast i64* %a to i8*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %ptr, i8* %src, i32 8, i1 true), !tbaa !0
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %ptr, i32 8, i1 true), !tbaa !3
ret void
}
define float @test25() {
; Check that we split up stores in order to promote the smaller SSA values.. These types
; of patterns can arise because LLVM maps small memcpy's to integer load and
; stores. If we get a memcpy of an aggregate (such as C and C++ frontends would
; produce, but so might any language frontend), this will in many cases turn into
; an integer load and store. SROA needs to be extremely powerful to correctly
; handle these cases and form splitable and promotable SSA values.
;
; CHECK-LABEL: @test25(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32 0 to float
; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32 1065353216 to float
; CHECK-NEXT: [[RET:%.*]] = fadd float [[TMP0]], [[TMP1]]
; CHECK-NEXT: ret float [[RET]]
;
entry:
%a = alloca i64
%b = alloca i64
%a.cast = bitcast i64* %a to [2 x float]*
%a.gep1 = getelementptr [2 x float], [2 x float]* %a.cast, i32 0, i32 0
%a.gep2 = getelementptr [2 x float], [2 x float]* %a.cast, i32 0, i32 1
%b.cast = bitcast i64* %b to [2 x float]*
%b.gep1 = getelementptr [2 x float], [2 x float]* %b.cast, i32 0, i32 0
%b.gep2 = getelementptr [2 x float], [2 x float]* %b.cast, i32 0, i32 1
store float 0.0, float* %a.gep1
store float 1.0, float* %a.gep2
%v = load i64, i64* %a
store i64 %v, i64* %b
%f1 = load float, float* %b.gep1
%f2 = load float, float* %b.gep2
%ret = fadd float %f1, %f2
ret float %ret
}
@complex1 = external global [2 x float]
@complex2 = external global [2 x float]
define void @test26() {
; Test a case of splitting up loads and stores against a globals.
;
; CHECK-LABEL: @test26(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[V14:%.*]] = load i32, i32* bitcast ([2 x float]* @complex1 to i32*), align 4
; CHECK-NEXT: [[V16:%.*]] = load i32, i32* bitcast (float* getelementptr inbounds ([2 x float], [2 x float]* @complex1, i64 0, i64 1) to i32*), align 4
; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32 [[V14]] to float
; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32 [[V16]] to float
; CHECK-NEXT: [[SUM:%.*]] = fadd float [[TMP0]], [[TMP1]]
; CHECK-NEXT: [[TMP2:%.*]] = bitcast float [[SUM]] to i32
; CHECK-NEXT: [[TMP3:%.*]] = bitcast float [[SUM]] to i32
; CHECK-NEXT: store i32 [[TMP2]], i32* bitcast ([2 x float]* @complex2 to i32*), align 4
; CHECK-NEXT: store i32 [[TMP3]], i32* bitcast (float* getelementptr inbounds ([2 x float], [2 x float]* @complex2, i64 0, i64 1) to i32*), align 4
; CHECK-NEXT: ret void
;
entry:
%a = alloca i64
%a.cast = bitcast i64* %a to [2 x float]*
%a.gep1 = getelementptr [2 x float], [2 x float]* %a.cast, i32 0, i32 0
%a.gep2 = getelementptr [2 x float], [2 x float]* %a.cast, i32 0, i32 1
%v1 = load i64, i64* bitcast ([2 x float]* @complex1 to i64*)
store i64 %v1, i64* %a
%f1 = load float, float* %a.gep1
%f2 = load float, float* %a.gep2
%sum = fadd float %f1, %f2
store float %sum, float* %a.gep1
store float %sum, float* %a.gep2
%v2 = load i64, i64* %a
store i64 %v2, i64* bitcast ([2 x float]* @complex2 to i64*)
ret void
}
define float @test27() {
; Another, more complex case of splittable i64 loads and stores. This example
; is a particularly challenging one because the load and store both point into
; the alloca SROA is processing, and they overlap but at an offset.
;
; CHECK-LABEL: @test27(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32 0 to float
; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32 1065353216 to float
; CHECK-NEXT: [[RET:%.*]] = fadd float [[TMP0]], [[TMP1]]
; CHECK-NEXT: ret float [[RET]]
;
entry:
%a = alloca [12 x i8]
%gep1 = getelementptr [12 x i8], [12 x i8]* %a, i32 0, i32 0
%gep2 = getelementptr [12 x i8], [12 x i8]* %a, i32 0, i32 4
%gep3 = getelementptr [12 x i8], [12 x i8]* %a, i32 0, i32 8
%iptr1 = bitcast i8* %gep1 to i64*
%iptr2 = bitcast i8* %gep2 to i64*
%fptr1 = bitcast i8* %gep1 to float*
%fptr2 = bitcast i8* %gep2 to float*
%fptr3 = bitcast i8* %gep3 to float*
store float 0.0, float* %fptr1
store float 1.0, float* %fptr2
%v = load i64, i64* %iptr1
store i64 %v, i64* %iptr2
%f1 = load float, float* %fptr2
%f2 = load float, float* %fptr3
%ret = fadd float %f1, %f2
ret float %ret
}
define i32 @PR22093() {
; Test that we don't try to pre-split a splittable store of a splittable but
; not pre-splittable load over the same alloca. We "handle" this case when the
; load is unsplittable but unrelated to this alloca by just generating extra
; loads without touching the original, but when the original load was out of
; this alloca we need to handle it specially to ensure the splits line up
; properly for rewriting.
;
; CHECK-LABEL: @PR22093(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A_SROA_0:%.*]] = alloca i16, align 4
; CHECK-NEXT: store volatile i16 42, i16* [[A_SROA_0]], align 4
; CHECK-NEXT: [[A_SROA_0_0_A_SROA_0_0_LOAD:%.*]] = load i16, i16* [[A_SROA_0]], align 4
; CHECK-NEXT: [[A_SROA_3_0_INSERT_EXT:%.*]] = zext i16 undef to i32
; CHECK-NEXT: [[A_SROA_3_0_INSERT_SHIFT:%.*]] = shl i32 [[A_SROA_3_0_INSERT_EXT]], 16
; CHECK-NEXT: [[A_SROA_3_0_INSERT_MASK:%.*]] = and i32 undef, 65535
; CHECK-NEXT: [[A_SROA_3_0_INSERT_INSERT:%.*]] = or i32 [[A_SROA_3_0_INSERT_MASK]], [[A_SROA_3_0_INSERT_SHIFT]]
; CHECK-NEXT: [[A_SROA_0_0_INSERT_EXT:%.*]] = zext i16 [[A_SROA_0_0_A_SROA_0_0_LOAD]] to i32
; CHECK-NEXT: [[A_SROA_0_0_INSERT_MASK:%.*]] = and i32 [[A_SROA_3_0_INSERT_INSERT]], -65536
; CHECK-NEXT: [[A_SROA_0_0_INSERT_INSERT:%.*]] = or i32 [[A_SROA_0_0_INSERT_MASK]], [[A_SROA_0_0_INSERT_EXT]]
; CHECK-NEXT: [[A_SROA_0_0_EXTRACT_TRUNC:%.*]] = trunc i32 [[A_SROA_0_0_INSERT_INSERT]] to i16
; CHECK-NEXT: store i16 [[A_SROA_0_0_EXTRACT_TRUNC]], i16* [[A_SROA_0]], align 4
; CHECK-NEXT: [[A_SROA_3_0_EXTRACT_SHIFT:%.*]] = lshr i32 [[A_SROA_0_0_INSERT_INSERT]], 16
; CHECK-NEXT: [[A_SROA_3_0_EXTRACT_TRUNC:%.*]] = trunc i32 [[A_SROA_3_0_EXTRACT_SHIFT]] to i16
; CHECK-NEXT: ret i32 [[A_SROA_0_0_INSERT_INSERT]]
;
entry:
%a = alloca i32
%a.cast = bitcast i32* %a to i16*
store volatile i16 42, i16* %a.cast
%load = load i32, i32* %a
store i32 %load, i32* %a
ret i32 %load
}
define void @PR22093.2() {
; Another way that we end up being unable to split a particular set of loads
; and stores can even have ordering importance. Here we have a load which is
; pre-splittable by itself, and the first store is also compatible. But the
; second store of the load makes the load unsplittable because of a mismatch of
; splits. Because this makes the load unsplittable, we also have to go back and
; remove the first store from the presplit candidates as its load won't be
; presplit.
;
; CHECK-LABEL: @PR22093.2(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A_SROA_0:%.*]] = alloca i16, align 8
; CHECK-NEXT: [[A_SROA_31:%.*]] = alloca i8, align 4
; CHECK-NEXT: store volatile i16 42, i16* [[A_SROA_0]], align 8
; CHECK-NEXT: [[A_SROA_0_0_A_SROA_0_0_LOAD:%.*]] = load i16, i16* [[A_SROA_0]], align 8
; CHECK-NEXT: [[A_SROA_3_0_INSERT_EXT:%.*]] = zext i16 undef to i32
; CHECK-NEXT: [[A_SROA_3_0_INSERT_SHIFT:%.*]] = shl i32 [[A_SROA_3_0_INSERT_EXT]], 16
; CHECK-NEXT: [[A_SROA_3_0_INSERT_MASK:%.*]] = and i32 undef, 65535
; CHECK-NEXT: [[A_SROA_3_0_INSERT_INSERT:%.*]] = or i32 [[A_SROA_3_0_INSERT_MASK]], [[A_SROA_3_0_INSERT_SHIFT]]
; CHECK-NEXT: [[A_SROA_0_0_INSERT_EXT:%.*]] = zext i16 [[A_SROA_0_0_A_SROA_0_0_LOAD]] to i32
; CHECK-NEXT: [[A_SROA_0_0_INSERT_MASK:%.*]] = and i32 [[A_SROA_3_0_INSERT_INSERT]], -65536
; CHECK-NEXT: [[A_SROA_0_0_INSERT_INSERT:%.*]] = or i32 [[A_SROA_0_0_INSERT_MASK]], [[A_SROA_0_0_INSERT_EXT]]
; CHECK-NEXT: [[A_SROA_0_0_EXTRACT_TRUNC:%.*]] = trunc i32 [[A_SROA_0_0_INSERT_INSERT]] to i16
; CHECK-NEXT: store i16 [[A_SROA_0_0_EXTRACT_TRUNC]], i16* [[A_SROA_0]], align 8
; CHECK-NEXT: [[A_SROA_3_0_EXTRACT_SHIFT:%.*]] = lshr i32 [[A_SROA_0_0_INSERT_INSERT]], 16
; CHECK-NEXT: [[A_SROA_3_0_EXTRACT_TRUNC:%.*]] = trunc i32 [[A_SROA_3_0_EXTRACT_SHIFT]] to i16
; CHECK-NEXT: store volatile i8 13, i8* [[A_SROA_31]], align 4
; CHECK-NEXT: [[A_SROA_31_4_EXTRACT_TRUNC:%.*]] = trunc i32 [[A_SROA_0_0_INSERT_INSERT]] to i8
; CHECK-NEXT: store i8 [[A_SROA_31_4_EXTRACT_TRUNC]], i8* [[A_SROA_31]], align 4
; CHECK-NEXT: [[A_SROA_5_4_EXTRACT_SHIFT:%.*]] = lshr i32 [[A_SROA_0_0_INSERT_INSERT]], 8
; CHECK-NEXT: [[A_SROA_5_4_EXTRACT_TRUNC:%.*]] = trunc i32 [[A_SROA_5_4_EXTRACT_SHIFT]] to i24
; CHECK-NEXT: ret void
;
entry:
%a = alloca i64
%a.cast1 = bitcast i64* %a to i32*
%a.cast2 = bitcast i64* %a to i16*
store volatile i16 42, i16* %a.cast2
%load = load i32, i32* %a.cast1
store i32 %load, i32* %a.cast1
%a.gep1 = getelementptr i32, i32* %a.cast1, i32 1
%a.cast3 = bitcast i32* %a.gep1 to i8*
store volatile i8 13, i8* %a.cast3
store i32 %load, i32* %a.gep1
ret void
}
define void @PR23737() {
; CHECK-LABEL: @PR23737(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[PTR:%.*]] = alloca i64, align 8
; CHECK-NEXT: store atomic volatile i64 0, i64* [[PTR]] seq_cst, align 8
; CHECK-NEXT: [[PTR_0_LOAD:%.*]] = load atomic volatile i64, i64* [[PTR]] seq_cst, align 8
; CHECK-NEXT: ret void
;
entry:
%ptr = alloca i64, align 8
store atomic volatile i64 0, i64* %ptr seq_cst, align 8
%load = load atomic volatile i64, i64* %ptr seq_cst, align 8
ret void
}
define i16 @PR24463() {
; Ensure we can handle a very interesting case where there is an integer-based
; rewrite of the uses of the alloca, but where one of the integers in that is
; a sub-integer that requires extraction *and* extends past the end of the
; alloca. SROA can split the alloca to avoid shift or trunc.
;
; CHECK-LABEL: @PR24463(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[ALLOCA_SROA_1_2_LOAD_EXT:%.*]] = zext i8 0 to i16
; CHECK-NEXT: ret i16 [[ALLOCA_SROA_1_2_LOAD_EXT]]
;
entry:
%alloca = alloca [3 x i8]
%gep1 = getelementptr inbounds [3 x i8], [3 x i8]* %alloca, i64 0, i64 1
%bc1 = bitcast i8* %gep1 to i16*
store i16 0, i16* %bc1
%gep2 = getelementptr inbounds [3 x i8], [3 x i8]* %alloca, i64 0, i64 2
%bc2 = bitcast i8* %gep2 to i16*
%load = load i16, i16* %bc2
ret i16 %load
}
%struct.STest = type { %struct.SPos, %struct.SPos }
%struct.SPos = type { float, float }
define void @PR25873(%struct.STest* %outData) {
; CHECK-LABEL: @PR25873(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMPDATA_SROA_0_0__SROA_CAST1_SROA_CAST:%.*]] = bitcast %struct.STest* [[OUTDATA:%.*]] to i32*
; CHECK-NEXT: store i32 1123418112, i32* [[TMPDATA_SROA_0_0__SROA_CAST1_SROA_CAST]], align 4
; CHECK-NEXT: [[TMPDATA_SROA_0_0__SROA_CAST1_SROA_IDX:%.*]] = getelementptr inbounds [[STRUCT_STEST:%.*]], %struct.STest* [[OUTDATA]], i64 0, i32 0, i32 1
; CHECK-NEXT: [[TMPDATA_SROA_0_0__SROA_CAST1_SROA_CAST16:%.*]] = bitcast float* [[TMPDATA_SROA_0_0__SROA_CAST1_SROA_IDX]] to i32*
; CHECK-NEXT: store i32 1139015680, i32* [[TMPDATA_SROA_0_0__SROA_CAST1_SROA_CAST16]], align 4
; CHECK-NEXT: [[TMPDATA_SROA_6_0__SROA_IDX3:%.*]] = getelementptr inbounds [[STRUCT_STEST]], %struct.STest* [[OUTDATA]], i64 0, i32 1
; CHECK-NEXT: [[TMPDATA_SROA_6_0__SROA_CAST4:%.*]] = bitcast %struct.SPos* [[TMPDATA_SROA_6_0__SROA_IDX3]] to i64*
; CHECK-NEXT: [[TMPDATA_SROA_6_SROA_4_0_INSERT_EXT:%.*]] = zext i32 1139015680 to i64
; CHECK-NEXT: [[TMPDATA_SROA_6_SROA_4_0_INSERT_SHIFT:%.*]] = shl i64 [[TMPDATA_SROA_6_SROA_4_0_INSERT_EXT]], 32
; CHECK-NEXT: [[TMPDATA_SROA_6_SROA_4_0_INSERT_MASK:%.*]] = and i64 undef, 4294967295
; CHECK-NEXT: [[TMPDATA_SROA_6_SROA_4_0_INSERT_INSERT:%.*]] = or i64 [[TMPDATA_SROA_6_SROA_4_0_INSERT_MASK]], [[TMPDATA_SROA_6_SROA_4_0_INSERT_SHIFT]]
; CHECK-NEXT: [[TMPDATA_SROA_6_SROA_0_0_INSERT_EXT:%.*]] = zext i32 1123418112 to i64
; CHECK-NEXT: [[TMPDATA_SROA_6_SROA_0_0_INSERT_MASK:%.*]] = and i64 [[TMPDATA_SROA_6_SROA_4_0_INSERT_INSERT]], -4294967296
; CHECK-NEXT: [[TMPDATA_SROA_6_SROA_0_0_INSERT_INSERT:%.*]] = or i64 [[TMPDATA_SROA_6_SROA_0_0_INSERT_MASK]], [[TMPDATA_SROA_6_SROA_0_0_INSERT_EXT]]
; CHECK-NEXT: store i64 [[TMPDATA_SROA_6_SROA_0_0_INSERT_INSERT]], i64* [[TMPDATA_SROA_6_0__SROA_CAST4]], align 4
; CHECK-NEXT: ret void
;
entry:
%tmpData = alloca %struct.STest, align 8
%0 = bitcast %struct.STest* %tmpData to i8*
call void @llvm.lifetime.start.p0i8(i64 16, i8* %0)
%x = getelementptr inbounds %struct.STest, %struct.STest* %tmpData, i64 0, i32 0, i32 0
store float 1.230000e+02, float* %x, align 8
%y = getelementptr inbounds %struct.STest, %struct.STest* %tmpData, i64 0, i32 0, i32 1
store float 4.560000e+02, float* %y, align 4
%m_posB = getelementptr inbounds %struct.STest, %struct.STest* %tmpData, i64 0, i32 1
%1 = bitcast %struct.STest* %tmpData to i64*
%2 = bitcast %struct.SPos* %m_posB to i64*
%3 = load i64, i64* %1, align 8
store i64 %3, i64* %2, align 8
%4 = bitcast %struct.STest* %outData to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %4, i8* align 4 %0, i64 16, i1 false)
call void @llvm.lifetime.end.p0i8(i64 16, i8* %0)
ret void
}
declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
define void @PR27999() unnamed_addr {
; CHECK-LABEL: @PR27999(
; CHECK-NEXT: entry-block:
; CHECK-NEXT: ret void
;
entry-block:
%0 = alloca [2 x i64], align 8
%1 = bitcast [2 x i64]* %0 to i8*
call void @llvm.lifetime.start.p0i8(i64 16, i8* %1)
%2 = getelementptr inbounds [2 x i64], [2 x i64]* %0, i32 0, i32 1
%3 = bitcast i64* %2 to i8*
call void @llvm.lifetime.end.p0i8(i64 8, i8* %3)
ret void
}
define void @PR29139() {
; CHECK-LABEL: @PR29139(
; CHECK-NEXT: bb1:
; CHECK-NEXT: ret void
;
bb1:
%e.7.sroa.6.i = alloca i32, align 1
%e.7.sroa.6.0.load81.i = load i32, i32* %e.7.sroa.6.i, align 1
%0 = bitcast i32* %e.7.sroa.6.i to i8*
call void @llvm.lifetime.end.p0i8(i64 2, i8* %0)
ret void
}
; PR35657 reports assertion failure with this code
define void @PR35657(i64 %v) {
; CHECK-LABEL: @PR35657(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A48_SROA_0_0_EXTRACT_TRUNC:%.*]] = trunc i64 [[V:%.*]] to i16
; CHECK-NEXT: [[A48_SROA_2_0_EXTRACT_SHIFT:%.*]] = lshr i64 [[V]], 16
; CHECK-NEXT: [[A48_SROA_2_0_EXTRACT_TRUNC:%.*]] = trunc i64 [[A48_SROA_2_0_EXTRACT_SHIFT]] to i48
; CHECK-NEXT: call void @callee16(i16 [[A48_SROA_0_0_EXTRACT_TRUNC]])
; CHECK-NEXT: call void @callee48(i48 [[A48_SROA_2_0_EXTRACT_TRUNC]])
; CHECK-NEXT: ret void
;
entry:
%a48 = alloca i48
%a48.cast64 = bitcast i48* %a48 to i64*
store i64 %v, i64* %a48.cast64
%a48.cast16 = bitcast i48* %a48 to i16*
%b0_15 = load i16, i16* %a48.cast16
%a48.cast8 = bitcast i48* %a48 to i8*
%a48_offset2 = getelementptr inbounds i8, i8* %a48.cast8, i64 2
%a48_offset2.cast48 = bitcast i8* %a48_offset2 to i48*
%b16_63 = load i48, i48* %a48_offset2.cast48, align 2
call void @callee16(i16 %b0_15)
call void @callee48(i48 %b16_63)
ret void
}
declare void @callee16(i16 %a)
declare void @callee48(i48 %a)
define void @test28(i64 %v) #0 {
; SROA should split the first i64 store to avoid additional and/or instructions
; when storing into i32 fields
; CHECK-LABEL: @test28(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[T_SROA_0_8_EXTRACT_TRUNC:%.*]] = trunc i64 [[V:%.*]] to i32
; CHECK-NEXT: [[T_SROA_2_8_EXTRACT_SHIFT:%.*]] = lshr i64 [[V]], 32
; CHECK-NEXT: [[T_SROA_2_8_EXTRACT_TRUNC:%.*]] = trunc i64 [[T_SROA_2_8_EXTRACT_SHIFT]] to i32
; CHECK-NEXT: ret void
;
entry:
%t = alloca { i64, i32, i32 }
%b = getelementptr { i64, i32, i32 }, { i64, i32, i32 }* %t, i32 0, i32 1
%0 = bitcast i32* %b to i64*
store i64 %v, i64* %0
%1 = load i32, i32* %b
%c = getelementptr { i64, i32, i32 }, { i64, i32, i32 }* %t, i32 0, i32 2
store i32 %1, i32* %c
ret void
}
declare void @llvm.lifetime.start.isVoid.i64.p0i8(i64, [10 x float]* nocapture)
declare void @llvm.lifetime.end.isVoid.i64.p0i8(i64, [10 x float]* nocapture)
@array = dso_local global [10 x float] undef, align 4
define void @test29(i32 %num, i32 %tid) {
; CHECK-LABEL: @test29(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i32 [[NUM:%.*]], 0
; CHECK-NEXT: br i1 [[CMP1]], label [[BB1:%.*]], label [[BB7:%.*]]
; CHECK: bb1:
; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[TID:%.*]], 0
; CHECK-NEXT: [[CONV_I:%.*]] = zext i32 [[TID]] to i64
; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [10 x float], [10 x float]* @array, i64 0, i64 [[CONV_I]]
; CHECK-NEXT: [[TMP0:%.*]] = bitcast float* [[ARRAYIDX5]] to i32*
; CHECK-NEXT: br label [[BB2:%.*]]
; CHECK: bb2:
; CHECK-NEXT: [[I_02:%.*]] = phi i32 [ [[NUM]], [[BB1]] ], [ [[SUB:%.*]], [[BB5:%.*]] ]
; CHECK-NEXT: br i1 [[TOBOOL]], label [[BB3:%.*]], label [[BB4:%.*]]
; CHECK: bb3:
; CHECK-NEXT: br label [[BB5]]
; CHECK: bb4:
; CHECK-NEXT: store i32 undef, i32* [[TMP0]], align 4
; CHECK-NEXT: br label [[BB5]]
; CHECK: bb5:
; CHECK-NEXT: [[SUB]] = add i32 [[I_02]], -1
; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[SUB]], 0
; CHECK-NEXT: br i1 [[CMP]], label [[BB2]], label [[BB6:%.*]]
; CHECK: bb6:
; CHECK-NEXT: br label [[BB7]]
; CHECK: bb7:
; CHECK-NEXT: ret void
;
entry:
%ra = alloca [10 x float], align 4
call void @llvm.lifetime.start.isVoid.i64.p0i8(i64 40, [10 x float]* nonnull %ra)
%cmp1 = icmp sgt i32 %num, 0
br i1 %cmp1, label %bb1, label %bb7
bb1:
%tobool = icmp eq i32 %tid, 0
%conv.i = zext i32 %tid to i64
%0 = bitcast [10 x float]* %ra to i32*
%1 = load i32, i32* %0, align 4
%arrayidx5 = getelementptr inbounds [10 x float], [10 x float]* @array, i64 0, i64 %conv.i
%2 = bitcast float* %arrayidx5 to i32*
br label %bb2
bb2:
%i.02 = phi i32 [ %num, %bb1 ], [ %sub, %bb5 ]
br i1 %tobool, label %bb3, label %bb4
bb3:
br label %bb5
bb4:
store i32 %1, i32* %2, align 4
br label %bb5
bb5:
%sub = add i32 %i.02, -1
%cmp = icmp sgt i32 %sub, 0
br i1 %cmp, label %bb2, label %bb6
bb6:
br label %bb7
bb7:
call void @llvm.lifetime.end.isVoid.i64.p0i8(i64 40, [10 x float]* nonnull %ra)
ret void
}
!0 = !{!1, !1, i64 0, i64 200}
!1 = !{!2, i64 1, !"type_0"}
!2 = !{!"root"}
!3 = !{!4, !4, i64 0, i64 1}
!4 = !{!2, i64 1, !"type_3"}
!5 = !{!6, !6, i64 0, i64 1}
!6 = !{!2, i64 1, !"type_5"}
!7 = !{!8, !8, i64 0, i64 1}
!8 = !{!2, i64 1, !"type_7"}
!9 = !{!10, !10, i64 0, i64 1}
!10 = !{!2, i64 1, !"type_9"}
!11 = !{!12, !12, i64 0, i64 1}
!12 = !{!2, i64 1, !"type_11"}
!13 = !{!14, !14, i64 0, i64 1}
!14 = !{!2, i64 1, !"type_13"}
!15 = !{!16, !16, i64 0, i64 1}
!16 = !{!2, i64 1, !"type_15"}
!17 = !{!18, !18, i64 0, i64 1}
!18 = !{!2, i64 1, !"type_17"}
!19 = !{!20, !20, i64 0, i64 1}
!20 = !{!2, i64 1, !"type_19"}
!21 = !{!22, !22, i64 0, i64 1}
!22 = !{!2, i64 1, !"type_21"}
!23 = !{!24, !24, i64 0, i64 1}
!24 = !{!2, i64 1, !"type_23"}
!25 = !{!26, !26, i64 0, i64 1}
!26 = !{!2, i64 1, !"type_25"}
!27 = !{!28, !28, i64 0, i64 1}
!28 = !{!2, i64 1, !"type_27"}
!29 = !{!30, !30, i64 0, i64 1}
!30 = !{!2, i64 1, !"type_29"}
!31 = !{!32, !32, i64 0, i64 1}
!32 = !{!2, i64 1, !"type_31"}
!33 = !{!34, !34, i64 0, i64 1}
!34 = !{!2, i64 1, !"type_33"}
!35 = !{!36, !36, i64 0, i64 1}
!36 = !{!2, i64 1, !"type_35"}
!37 = !{!38, !38, i64 0, i64 1}
!38 = !{!2, i64 1, !"type_37"}
!39 = !{!40, !40, i64 0, i64 1}
!40 = !{!2, i64 1, !"type_39"}
!41 = !{!42, !42, i64 0, i64 1}
!42 = !{!2, i64 1, !"type_41"}
!43 = !{!44, !44, i64 0, i64 1}
!44 = !{!2, i64 1, !"type_43"}
!45 = !{!46, !46, i64 0, i64 1}
!46 = !{!2, i64 1, !"type_45"}
!47 = !{!48, !48, i64 0, i64 1}
!48 = !{!2, i64 1, !"type_47"}
!49 = !{!50, !50, i64 0, i64 1}
!50 = !{!2, i64 1, !"type_49"}
!51 = !{!52, !52, i64 0, i64 1}
!52 = !{!2, i64 1, !"type_51"}
!53 = !{!54, !54, i64 0, i64 1}
!54 = !{!2, i64 1, !"type_53"}
!55 = !{!56, !56, i64 0, i64 1}
!56 = !{!2, i64 1, !"type_55"}
!57 = !{!58, !58, i64 0, i64 1}
!58 = !{!2, i64 1, !"type_57"}
!59 = !{!60, !60, i64 0, i64 1}
!60 = !{!2, i64 1, !"type_59"}