| ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 |
| ; RUN: opt < %s -S -msan-check-access-address=0 -passes="msan" 2>&1 | FileCheck %s |
| ; RUN: opt < %s -S -msan-check-access-address=0 -passes="msan" -msan-track-origins=2 2>&1 | FileCheck %s --check-prefixes=ORIGIN |
| |
| target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" |
| target triple = "x86_64-unknown-linux-gnu" |
| |
| define void @test_load_store_i32(ptr %a, ptr %b) sanitize_memory { |
| ; CHECK-LABEL: define void @test_load_store_i32( |
| ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0:[0-9]+]] { |
| ; CHECK-NEXT: call void @llvm.donothing() |
| ; CHECK-NEXT: [[TMP1:%.*]] = load <vscale x 4 x i32>, ptr [[A]], align 16 |
| ; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64 |
| ; CHECK-NEXT: [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080 |
| ; CHECK-NEXT: [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr |
| ; CHECK-NEXT: [[_MSLD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP4]], align 16 |
| ; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[B]] to i64 |
| ; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080 |
| ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr |
| ; CHECK-NEXT: store <vscale x 4 x i32> [[_MSLD]], ptr [[TMP7]], align 16 |
| ; CHECK-NEXT: store <vscale x 4 x i32> [[TMP1]], ptr [[B]], align 16 |
| ; CHECK-NEXT: ret void |
| ; |
| ; ORIGIN-LABEL: define void @test_load_store_i32( |
| ; ORIGIN-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0:[0-9]+]] { |
| ; ORIGIN-NEXT: call void @llvm.donothing() |
| ; ORIGIN-NEXT: [[TMP1:%.*]] = load <vscale x 4 x i32>, ptr [[A]], align 16 |
| ; ORIGIN-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64 |
| ; ORIGIN-NEXT: [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080 |
| ; ORIGIN-NEXT: [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr |
| ; ORIGIN-NEXT: [[TMP5:%.*]] = add i64 [[TMP3]], 17592186044416 |
| ; ORIGIN-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr |
| ; ORIGIN-NEXT: [[_MSLD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP4]], align 16 |
| ; ORIGIN-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 16 |
| ; ORIGIN-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[B]] to i64 |
| ; ORIGIN-NEXT: [[TMP9:%.*]] = xor i64 [[TMP8]], 87960930222080 |
| ; ORIGIN-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr |
| ; ORIGIN-NEXT: [[TMP11:%.*]] = add i64 [[TMP9]], 17592186044416 |
| ; ORIGIN-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr |
| ; ORIGIN-NEXT: store <vscale x 4 x i32> [[_MSLD]], ptr [[TMP10]], align 16 |
| ; ORIGIN-NEXT: [[TMP13:%.*]] = call i32 @llvm.vector.reduce.or.nxv4i32(<vscale x 4 x i32> [[_MSLD]]) |
| ; ORIGIN-NEXT: [[_MSCMP:%.*]] = icmp ne i32 [[TMP13]], 0 |
| ; ORIGIN-NEXT: br i1 [[_MSCMP]], label [[TMP14:%.*]], label [[TMP21:%.*]], !prof [[PROF1:![0-9]+]] |
| ; ORIGIN: 14: |
| ; ORIGIN-NEXT: [[TMP15:%.*]] = call i32 @__msan_chain_origin(i32 [[TMP7]]) |
| ; ORIGIN-NEXT: [[TMP16:%.*]] = call i64 @llvm.vscale.i64() |
| ; ORIGIN-NEXT: [[TMP17:%.*]] = mul nuw i64 [[TMP16]], 16 |
| ; ORIGIN-NEXT: [[TMP18:%.*]] = add i64 [[TMP17]], 3 |
| ; ORIGIN-NEXT: [[TMP19:%.*]] = udiv i64 [[TMP18]], 4 |
| ; ORIGIN-NEXT: br label [[DOTSPLIT:%.*]] |
| ; ORIGIN: .split: |
| ; ORIGIN-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP14]] ], [ [[IV_NEXT:%.*]], [[DOTSPLIT]] ] |
| ; ORIGIN-NEXT: [[TMP20:%.*]] = getelementptr i32, ptr [[TMP12]], i64 [[IV]] |
| ; ORIGIN-NEXT: store i32 [[TMP15]], ptr [[TMP20]], align 4 |
| ; ORIGIN-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 |
| ; ORIGIN-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP19]] |
| ; ORIGIN-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]] |
| ; ORIGIN: .split.split: |
| ; ORIGIN-NEXT: br label [[TMP21]] |
| ; ORIGIN: 21: |
| ; ORIGIN-NEXT: store <vscale x 4 x i32> [[TMP1]], ptr [[B]], align 16 |
| ; ORIGIN-NEXT: ret void |
| ; |
| %1 = load <vscale x 4 x i32>, ptr %a |
| store <vscale x 4 x i32> %1, ptr %b |
| ret void |
| } |
| |
| define void @test_load_store_add_int(ptr %a, ptr %b) sanitize_memory { |
| ; CHECK-LABEL: define void @test_load_store_add_int( |
| ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { |
| ; CHECK-NEXT: call void @llvm.donothing() |
| ; CHECK-NEXT: [[TMP1:%.*]] = load <vscale x 8 x i64>, ptr [[A]], align 64 |
| ; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64 |
| ; CHECK-NEXT: [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080 |
| ; CHECK-NEXT: [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr |
| ; CHECK-NEXT: [[_MSLD:%.*]] = load <vscale x 8 x i64>, ptr [[TMP4]], align 64 |
| ; CHECK-NEXT: [[TMP5:%.*]] = load <vscale x 8 x i64>, ptr [[B]], align 64 |
| ; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[B]] to i64 |
| ; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 87960930222080 |
| ; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr |
| ; CHECK-NEXT: [[_MSLD1:%.*]] = load <vscale x 8 x i64>, ptr [[TMP8]], align 64 |
| ; CHECK-NEXT: [[_MSPROP:%.*]] = or <vscale x 8 x i64> [[_MSLD]], [[_MSLD1]] |
| ; CHECK-NEXT: [[TMP9:%.*]] = add <vscale x 8 x i64> [[TMP1]], [[TMP5]] |
| ; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[B]] to i64 |
| ; CHECK-NEXT: [[TMP11:%.*]] = xor i64 [[TMP10]], 87960930222080 |
| ; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr |
| ; CHECK-NEXT: store <vscale x 8 x i64> [[_MSLD1]], ptr [[TMP12]], align 64 |
| ; CHECK-NEXT: store <vscale x 8 x i64> [[TMP5]], ptr [[B]], align 64 |
| ; CHECK-NEXT: ret void |
| ; |
| ; ORIGIN-LABEL: define void @test_load_store_add_int( |
| ; ORIGIN-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { |
| ; ORIGIN-NEXT: call void @llvm.donothing() |
| ; ORIGIN-NEXT: [[TMP1:%.*]] = load <vscale x 8 x i64>, ptr [[A]], align 64 |
| ; ORIGIN-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64 |
| ; ORIGIN-NEXT: [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080 |
| ; ORIGIN-NEXT: [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr |
| ; ORIGIN-NEXT: [[TMP5:%.*]] = add i64 [[TMP3]], 17592186044416 |
| ; ORIGIN-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr |
| ; ORIGIN-NEXT: [[_MSLD:%.*]] = load <vscale x 8 x i64>, ptr [[TMP4]], align 64 |
| ; ORIGIN-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 64 |
| ; ORIGIN-NEXT: [[TMP8:%.*]] = load <vscale x 8 x i64>, ptr [[B]], align 64 |
| ; ORIGIN-NEXT: [[TMP9:%.*]] = ptrtoint ptr [[B]] to i64 |
| ; ORIGIN-NEXT: [[TMP10:%.*]] = xor i64 [[TMP9]], 87960930222080 |
| ; ORIGIN-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr |
| ; ORIGIN-NEXT: [[TMP12:%.*]] = add i64 [[TMP10]], 17592186044416 |
| ; ORIGIN-NEXT: [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr |
| ; ORIGIN-NEXT: [[_MSLD1:%.*]] = load <vscale x 8 x i64>, ptr [[TMP11]], align 64 |
| ; ORIGIN-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP13]], align 64 |
| ; ORIGIN-NEXT: [[_MSPROP:%.*]] = or <vscale x 8 x i64> [[_MSLD]], [[_MSLD1]] |
| ; ORIGIN-NEXT: [[TMP15:%.*]] = call i64 @llvm.vector.reduce.or.nxv8i64(<vscale x 8 x i64> [[_MSLD1]]) |
| ; ORIGIN-NEXT: [[TMP16:%.*]] = icmp ne i64 [[TMP15]], 0 |
| ; ORIGIN-NEXT: [[TMP17:%.*]] = select i1 [[TMP16]], i32 [[TMP14]], i32 [[TMP7]] |
| ; ORIGIN-NEXT: [[TMP18:%.*]] = add <vscale x 8 x i64> [[TMP1]], [[TMP8]] |
| ; ORIGIN-NEXT: [[TMP19:%.*]] = ptrtoint ptr [[B]] to i64 |
| ; ORIGIN-NEXT: [[TMP20:%.*]] = xor i64 [[TMP19]], 87960930222080 |
| ; ORIGIN-NEXT: [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr |
| ; ORIGIN-NEXT: [[TMP22:%.*]] = add i64 [[TMP20]], 17592186044416 |
| ; ORIGIN-NEXT: [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr |
| ; ORIGIN-NEXT: store <vscale x 8 x i64> [[_MSLD1]], ptr [[TMP21]], align 64 |
| ; ORIGIN-NEXT: [[TMP24:%.*]] = call i64 @llvm.vector.reduce.or.nxv8i64(<vscale x 8 x i64> [[_MSLD1]]) |
| ; ORIGIN-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP24]], 0 |
| ; ORIGIN-NEXT: br i1 [[_MSCMP]], label [[TMP25:%.*]], label [[TMP32:%.*]], !prof [[PROF1]] |
| ; ORIGIN: 25: |
| ; ORIGIN-NEXT: [[TMP26:%.*]] = call i32 @__msan_chain_origin(i32 [[TMP14]]) |
| ; ORIGIN-NEXT: [[TMP27:%.*]] = call i64 @llvm.vscale.i64() |
| ; ORIGIN-NEXT: [[TMP28:%.*]] = mul nuw i64 [[TMP27]], 64 |
| ; ORIGIN-NEXT: [[TMP29:%.*]] = add i64 [[TMP28]], 3 |
| ; ORIGIN-NEXT: [[TMP30:%.*]] = udiv i64 [[TMP29]], 4 |
| ; ORIGIN-NEXT: br label [[DOTSPLIT:%.*]] |
| ; ORIGIN: .split: |
| ; ORIGIN-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP25]] ], [ [[IV_NEXT:%.*]], [[DOTSPLIT]] ] |
| ; ORIGIN-NEXT: [[TMP31:%.*]] = getelementptr i32, ptr [[TMP23]], i64 [[IV]] |
| ; ORIGIN-NEXT: store i32 [[TMP26]], ptr [[TMP31]], align 4 |
| ; ORIGIN-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 |
| ; ORIGIN-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP30]] |
| ; ORIGIN-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]] |
| ; ORIGIN: .split.split: |
| ; ORIGIN-NEXT: br label [[TMP32]] |
| ; ORIGIN: 32: |
| ; ORIGIN-NEXT: store <vscale x 8 x i64> [[TMP8]], ptr [[B]], align 64 |
| ; ORIGIN-NEXT: ret void |
| ; |
| %1 = load <vscale x 8 x i64>, ptr %a |
| %2 = load <vscale x 8 x i64>, ptr %b |
| %3 = add <vscale x 8 x i64> %1, %2 |
| store <vscale x 8 x i64> %2, ptr %b |
| ret void |
| } |
| |
| define void @test_load_store_float(ptr %a, ptr %b) sanitize_memory { |
| ; CHECK-LABEL: define void @test_load_store_float( |
| ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { |
| ; CHECK-NEXT: call void @llvm.donothing() |
| ; CHECK-NEXT: [[TMP1:%.*]] = load <vscale x 4 x float>, ptr [[A]], align 16 |
| ; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64 |
| ; CHECK-NEXT: [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080 |
| ; CHECK-NEXT: [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr |
| ; CHECK-NEXT: [[_MSLD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP4]], align 16 |
| ; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[B]] to i64 |
| ; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080 |
| ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr |
| ; CHECK-NEXT: store <vscale x 4 x i32> [[_MSLD]], ptr [[TMP7]], align 16 |
| ; CHECK-NEXT: store <vscale x 4 x float> [[TMP1]], ptr [[B]], align 16 |
| ; CHECK-NEXT: ret void |
| ; |
| ; ORIGIN-LABEL: define void @test_load_store_float( |
| ; ORIGIN-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { |
| ; ORIGIN-NEXT: call void @llvm.donothing() |
| ; ORIGIN-NEXT: [[TMP1:%.*]] = load <vscale x 4 x float>, ptr [[A]], align 16 |
| ; ORIGIN-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64 |
| ; ORIGIN-NEXT: [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080 |
| ; ORIGIN-NEXT: [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr |
| ; ORIGIN-NEXT: [[TMP5:%.*]] = add i64 [[TMP3]], 17592186044416 |
| ; ORIGIN-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr |
| ; ORIGIN-NEXT: [[_MSLD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP4]], align 16 |
| ; ORIGIN-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 16 |
| ; ORIGIN-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[B]] to i64 |
| ; ORIGIN-NEXT: [[TMP9:%.*]] = xor i64 [[TMP8]], 87960930222080 |
| ; ORIGIN-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr |
| ; ORIGIN-NEXT: [[TMP11:%.*]] = add i64 [[TMP9]], 17592186044416 |
| ; ORIGIN-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr |
| ; ORIGIN-NEXT: store <vscale x 4 x i32> [[_MSLD]], ptr [[TMP10]], align 16 |
| ; ORIGIN-NEXT: [[TMP13:%.*]] = call i32 @llvm.vector.reduce.or.nxv4i32(<vscale x 4 x i32> [[_MSLD]]) |
| ; ORIGIN-NEXT: [[_MSCMP:%.*]] = icmp ne i32 [[TMP13]], 0 |
| ; ORIGIN-NEXT: br i1 [[_MSCMP]], label [[TMP14:%.*]], label [[TMP21:%.*]], !prof [[PROF1]] |
| ; ORIGIN: 14: |
| ; ORIGIN-NEXT: [[TMP15:%.*]] = call i32 @__msan_chain_origin(i32 [[TMP7]]) |
| ; ORIGIN-NEXT: [[TMP16:%.*]] = call i64 @llvm.vscale.i64() |
| ; ORIGIN-NEXT: [[TMP17:%.*]] = mul nuw i64 [[TMP16]], 16 |
| ; ORIGIN-NEXT: [[TMP18:%.*]] = add i64 [[TMP17]], 3 |
| ; ORIGIN-NEXT: [[TMP19:%.*]] = udiv i64 [[TMP18]], 4 |
| ; ORIGIN-NEXT: br label [[DOTSPLIT:%.*]] |
| ; ORIGIN: .split: |
| ; ORIGIN-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP14]] ], [ [[IV_NEXT:%.*]], [[DOTSPLIT]] ] |
| ; ORIGIN-NEXT: [[TMP20:%.*]] = getelementptr i32, ptr [[TMP12]], i64 [[IV]] |
| ; ORIGIN-NEXT: store i32 [[TMP15]], ptr [[TMP20]], align 4 |
| ; ORIGIN-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 |
| ; ORIGIN-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP19]] |
| ; ORIGIN-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]] |
| ; ORIGIN: .split.split: |
| ; ORIGIN-NEXT: br label [[TMP21]] |
| ; ORIGIN: 21: |
| ; ORIGIN-NEXT: store <vscale x 4 x float> [[TMP1]], ptr [[B]], align 16 |
| ; ORIGIN-NEXT: ret void |
| ; |
| %1 = load <vscale x 4 x float>, ptr %a |
| store <vscale x 4 x float> %1, ptr %b |
| ret void |
| } |
| |
| define void @test_load_store_add_float(ptr %a, ptr %b) sanitize_memory { |
| ; CHECK-LABEL: define void @test_load_store_add_float( |
| ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { |
| ; CHECK-NEXT: call void @llvm.donothing() |
| ; CHECK-NEXT: [[TMP1:%.*]] = load <vscale x 2 x float>, ptr [[A]], align 8 |
| ; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64 |
| ; CHECK-NEXT: [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080 |
| ; CHECK-NEXT: [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr |
| ; CHECK-NEXT: [[_MSLD:%.*]] = load <vscale x 2 x i32>, ptr [[TMP4]], align 8 |
| ; CHECK-NEXT: [[TMP5:%.*]] = load <vscale x 2 x float>, ptr [[B]], align 8 |
| ; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[B]] to i64 |
| ; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 87960930222080 |
| ; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr |
| ; CHECK-NEXT: [[_MSLD1:%.*]] = load <vscale x 2 x i32>, ptr [[TMP8]], align 8 |
| ; CHECK-NEXT: [[_MSPROP:%.*]] = or <vscale x 2 x i32> [[_MSLD]], [[_MSLD1]] |
| ; CHECK-NEXT: [[TMP9:%.*]] = fadd <vscale x 2 x float> [[TMP1]], [[TMP5]] |
| ; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[B]] to i64 |
| ; CHECK-NEXT: [[TMP11:%.*]] = xor i64 [[TMP10]], 87960930222080 |
| ; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr |
| ; CHECK-NEXT: store <vscale x 2 x i32> [[_MSLD1]], ptr [[TMP12]], align 8 |
| ; CHECK-NEXT: store <vscale x 2 x float> [[TMP5]], ptr [[B]], align 8 |
| ; CHECK-NEXT: ret void |
| ; |
| ; ORIGIN-LABEL: define void @test_load_store_add_float( |
| ; ORIGIN-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { |
| ; ORIGIN-NEXT: call void @llvm.donothing() |
| ; ORIGIN-NEXT: [[TMP1:%.*]] = load <vscale x 2 x float>, ptr [[A]], align 8 |
| ; ORIGIN-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64 |
| ; ORIGIN-NEXT: [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080 |
| ; ORIGIN-NEXT: [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr |
| ; ORIGIN-NEXT: [[TMP5:%.*]] = add i64 [[TMP3]], 17592186044416 |
| ; ORIGIN-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr |
| ; ORIGIN-NEXT: [[_MSLD:%.*]] = load <vscale x 2 x i32>, ptr [[TMP4]], align 8 |
| ; ORIGIN-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 8 |
| ; ORIGIN-NEXT: [[TMP8:%.*]] = load <vscale x 2 x float>, ptr [[B]], align 8 |
| ; ORIGIN-NEXT: [[TMP9:%.*]] = ptrtoint ptr [[B]] to i64 |
| ; ORIGIN-NEXT: [[TMP10:%.*]] = xor i64 [[TMP9]], 87960930222080 |
| ; ORIGIN-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr |
| ; ORIGIN-NEXT: [[TMP12:%.*]] = add i64 [[TMP10]], 17592186044416 |
| ; ORIGIN-NEXT: [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr |
| ; ORIGIN-NEXT: [[_MSLD1:%.*]] = load <vscale x 2 x i32>, ptr [[TMP11]], align 8 |
| ; ORIGIN-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP13]], align 8 |
| ; ORIGIN-NEXT: [[_MSPROP:%.*]] = or <vscale x 2 x i32> [[_MSLD]], [[_MSLD1]] |
| ; ORIGIN-NEXT: [[TMP15:%.*]] = call i32 @llvm.vector.reduce.or.nxv2i32(<vscale x 2 x i32> [[_MSLD1]]) |
| ; ORIGIN-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0 |
| ; ORIGIN-NEXT: [[TMP17:%.*]] = select i1 [[TMP16]], i32 [[TMP14]], i32 [[TMP7]] |
| ; ORIGIN-NEXT: [[TMP18:%.*]] = fadd <vscale x 2 x float> [[TMP1]], [[TMP8]] |
| ; ORIGIN-NEXT: [[TMP19:%.*]] = ptrtoint ptr [[B]] to i64 |
| ; ORIGIN-NEXT: [[TMP20:%.*]] = xor i64 [[TMP19]], 87960930222080 |
| ; ORIGIN-NEXT: [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr |
| ; ORIGIN-NEXT: [[TMP22:%.*]] = add i64 [[TMP20]], 17592186044416 |
| ; ORIGIN-NEXT: [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr |
| ; ORIGIN-NEXT: store <vscale x 2 x i32> [[_MSLD1]], ptr [[TMP21]], align 8 |
| ; ORIGIN-NEXT: [[TMP24:%.*]] = call i32 @llvm.vector.reduce.or.nxv2i32(<vscale x 2 x i32> [[_MSLD1]]) |
| ; ORIGIN-NEXT: [[_MSCMP:%.*]] = icmp ne i32 [[TMP24]], 0 |
| ; ORIGIN-NEXT: br i1 [[_MSCMP]], label [[TMP25:%.*]], label [[TMP32:%.*]], !prof [[PROF1]] |
| ; ORIGIN: 25: |
| ; ORIGIN-NEXT: [[TMP26:%.*]] = call i32 @__msan_chain_origin(i32 [[TMP14]]) |
| ; ORIGIN-NEXT: [[TMP27:%.*]] = call i64 @llvm.vscale.i64() |
| ; ORIGIN-NEXT: [[TMP28:%.*]] = mul nuw i64 [[TMP27]], 8 |
| ; ORIGIN-NEXT: [[TMP29:%.*]] = add i64 [[TMP28]], 3 |
| ; ORIGIN-NEXT: [[TMP30:%.*]] = udiv i64 [[TMP29]], 4 |
| ; ORIGIN-NEXT: br label [[DOTSPLIT:%.*]] |
| ; ORIGIN: .split: |
| ; ORIGIN-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP25]] ], [ [[IV_NEXT:%.*]], [[DOTSPLIT]] ] |
| ; ORIGIN-NEXT: [[TMP31:%.*]] = getelementptr i32, ptr [[TMP23]], i64 [[IV]] |
| ; ORIGIN-NEXT: store i32 [[TMP26]], ptr [[TMP31]], align 4 |
| ; ORIGIN-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 |
| ; ORIGIN-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP30]] |
| ; ORIGIN-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]] |
| ; ORIGIN: .split.split: |
| ; ORIGIN-NEXT: br label [[TMP32]] |
| ; ORIGIN: 32: |
| ; ORIGIN-NEXT: store <vscale x 2 x float> [[TMP8]], ptr [[B]], align 8 |
| ; ORIGIN-NEXT: ret void |
| ; |
| %1 = load <vscale x 2 x float>, ptr %a |
| %2 = load <vscale x 2 x float>, ptr %b |
| %3 = fadd <vscale x 2 x float> %1, %2 |
| store <vscale x 2 x float> %2, ptr %b |
| ret void |
| } |
| |
| define <vscale x 2 x float> @fn_ret(ptr %a) sanitize_memory { |
| ; CHECK-LABEL: define <vscale x 2 x float> @fn_ret( |
| ; CHECK-SAME: ptr [[A:%.*]]) #[[ATTR0]] { |
| ; CHECK-NEXT: call void @llvm.donothing() |
| ; CHECK-NEXT: [[TMP1:%.*]] = load <vscale x 2 x float>, ptr [[A]], align 8 |
| ; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64 |
| ; CHECK-NEXT: [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080 |
| ; CHECK-NEXT: [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr |
| ; CHECK-NEXT: [[_MSLD:%.*]] = load <vscale x 2 x i32>, ptr [[TMP4]], align 8 |
| ; CHECK-NEXT: store <vscale x 2 x i32> [[_MSLD]], ptr @__msan_retval_tls, align 8 |
| ; CHECK-NEXT: ret <vscale x 2 x float> [[TMP1]] |
| ; |
| ; ORIGIN-LABEL: define <vscale x 2 x float> @fn_ret( |
| ; ORIGIN-SAME: ptr [[A:%.*]]) #[[ATTR0]] { |
| ; ORIGIN-NEXT: call void @llvm.donothing() |
| ; ORIGIN-NEXT: [[TMP1:%.*]] = load <vscale x 2 x float>, ptr [[A]], align 8 |
| ; ORIGIN-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64 |
| ; ORIGIN-NEXT: [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080 |
| ; ORIGIN-NEXT: [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr |
| ; ORIGIN-NEXT: [[TMP5:%.*]] = add i64 [[TMP3]], 17592186044416 |
| ; ORIGIN-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr |
| ; ORIGIN-NEXT: [[_MSLD:%.*]] = load <vscale x 2 x i32>, ptr [[TMP4]], align 8 |
| ; ORIGIN-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 8 |
| ; ORIGIN-NEXT: store <vscale x 2 x i32> [[_MSLD]], ptr @__msan_retval_tls, align 8 |
| ; ORIGIN-NEXT: store i32 [[TMP7]], ptr @__msan_retval_origin_tls, align 4 |
| ; ORIGIN-NEXT: ret <vscale x 2 x float> [[TMP1]] |
| ; |
| %1 = load <vscale x 2 x float>, ptr %a |
| ret <vscale x 2 x float> %1 |
| } |
| |
| define void @test_ret(ptr %a, ptr %b) sanitize_memory { |
| ; CHECK-LABEL: define void @test_ret( |
| ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { |
| ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 |
| ; CHECK-NEXT: call void @llvm.donothing() |
| ; CHECK-NEXT: store i64 [[TMP1]], ptr @__msan_param_tls, align 8 |
| ; CHECK-NEXT: store <vscale x 2 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8 |
| ; CHECK-NEXT: [[TMP5:%.*]] = call <vscale x 2 x float> @fn_ret(ptr [[A]]) |
| ; CHECK-NEXT: [[_MSRET:%.*]] = load <vscale x 2 x i32>, ptr @__msan_retval_tls, align 8 |
| ; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[B]] to i64 |
| ; CHECK-NEXT: [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080 |
| ; CHECK-NEXT: [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr |
| ; CHECK-NEXT: store <vscale x 2 x i32> [[_MSRET]], ptr [[TMP4]], align 8 |
| ; CHECK-NEXT: store <vscale x 2 x float> [[TMP5]], ptr [[B]], align 8 |
| ; CHECK-NEXT: ret void |
| ; |
| ; ORIGIN-LABEL: define void @test_ret( |
| ; ORIGIN-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { |
| ; ORIGIN-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 |
| ; ORIGIN-NEXT: [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4 |
| ; ORIGIN-NEXT: call void @llvm.donothing() |
| ; ORIGIN-NEXT: store i64 [[TMP1]], ptr @__msan_param_tls, align 8 |
| ; ORIGIN-NEXT: store i32 [[TMP2]], ptr @__msan_param_origin_tls, align 4 |
| ; ORIGIN-NEXT: store <vscale x 2 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8 |
| ; ORIGIN-NEXT: [[TMP3:%.*]] = call <vscale x 2 x float> @fn_ret(ptr [[A]]) |
| ; ORIGIN-NEXT: [[_MSRET:%.*]] = load <vscale x 2 x i32>, ptr @__msan_retval_tls, align 8 |
| ; ORIGIN-NEXT: [[TMP4:%.*]] = load i32, ptr @__msan_retval_origin_tls, align 4 |
| ; ORIGIN-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[B]] to i64 |
| ; ORIGIN-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080 |
| ; ORIGIN-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr |
| ; ORIGIN-NEXT: [[TMP8:%.*]] = add i64 [[TMP6]], 17592186044416 |
| ; ORIGIN-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr |
| ; ORIGIN-NEXT: store <vscale x 2 x i32> [[_MSRET]], ptr [[TMP7]], align 8 |
| ; ORIGIN-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.or.nxv2i32(<vscale x 2 x i32> [[_MSRET]]) |
| ; ORIGIN-NEXT: [[_MSCMP:%.*]] = icmp ne i32 [[TMP10]], 0 |
| ; ORIGIN-NEXT: br i1 [[_MSCMP]], label [[TMP11:%.*]], label [[TMP18:%.*]], !prof [[PROF1]] |
| ; ORIGIN: 11: |
| ; ORIGIN-NEXT: [[TMP12:%.*]] = call i32 @__msan_chain_origin(i32 [[TMP4]]) |
| ; ORIGIN-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64() |
| ; ORIGIN-NEXT: [[TMP14:%.*]] = mul nuw i64 [[TMP13]], 8 |
| ; ORIGIN-NEXT: [[TMP15:%.*]] = add i64 [[TMP14]], 3 |
| ; ORIGIN-NEXT: [[TMP16:%.*]] = udiv i64 [[TMP15]], 4 |
| ; ORIGIN-NEXT: br label [[DOTSPLIT:%.*]] |
| ; ORIGIN: .split: |
| ; ORIGIN-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP11]] ], [ [[IV_NEXT:%.*]], [[DOTSPLIT]] ] |
| ; ORIGIN-NEXT: [[TMP17:%.*]] = getelementptr i32, ptr [[TMP9]], i64 [[IV]] |
| ; ORIGIN-NEXT: store i32 [[TMP12]], ptr [[TMP17]], align 4 |
| ; ORIGIN-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 |
| ; ORIGIN-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP16]] |
| ; ORIGIN-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]] |
| ; ORIGIN: .split.split: |
| ; ORIGIN-NEXT: br label [[TMP18]] |
| ; ORIGIN: 18: |
| ; ORIGIN-NEXT: store <vscale x 2 x float> [[TMP3]], ptr [[B]], align 8 |
| ; ORIGIN-NEXT: ret void |
| ; |
| %1 = call <vscale x 2 x float> @fn_ret(ptr %a) |
| store <vscale x 2 x float> %1, ptr %b |
| ret void |
| } |
| |
| define void @fn_param(<vscale x 2 x float> %a, ptr %b) sanitize_memory { |
| ; CHECK-LABEL: define void @fn_param( |
| ; CHECK-SAME: <vscale x 2 x float> [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { |
| ; CHECK-NEXT: call void @llvm.donothing() |
| ; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[B]] to i64 |
| ; CHECK-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080 |
| ; CHECK-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr |
| ; CHECK-NEXT: store <vscale x 2 x i32> zeroinitializer, ptr [[TMP3]], align 8 |
| ; CHECK-NEXT: store <vscale x 2 x float> [[A]], ptr [[B]], align 8 |
| ; CHECK-NEXT: ret void |
| ; |
| ; ORIGIN-LABEL: define void @fn_param( |
| ; ORIGIN-SAME: <vscale x 2 x float> [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { |
| ; ORIGIN-NEXT: call void @llvm.donothing() |
| ; ORIGIN-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[B]] to i64 |
| ; ORIGIN-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080 |
| ; ORIGIN-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr |
| ; ORIGIN-NEXT: [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416 |
| ; ORIGIN-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr |
| ; ORIGIN-NEXT: store <vscale x 2 x i32> zeroinitializer, ptr [[TMP3]], align 8 |
| ; ORIGIN-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.or.nxv2i32(<vscale x 2 x i32> zeroinitializer) |
| ; ORIGIN-NEXT: [[_MSCMP:%.*]] = icmp ne i32 [[TMP6]], 0 |
| ; ORIGIN-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP14:%.*]], !prof [[PROF1]] |
| ; ORIGIN: 7: |
| ; ORIGIN-NEXT: [[TMP8:%.*]] = call i32 @__msan_chain_origin(i32 0) |
| ; ORIGIN-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() |
| ; ORIGIN-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 8 |
| ; ORIGIN-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 3 |
| ; ORIGIN-NEXT: [[TMP12:%.*]] = udiv i64 [[TMP11]], 4 |
| ; ORIGIN-NEXT: br label [[DOTSPLIT:%.*]] |
| ; ORIGIN: .split: |
| ; ORIGIN-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP7]] ], [ [[IV_NEXT:%.*]], [[DOTSPLIT]] ] |
| ; ORIGIN-NEXT: [[TMP13:%.*]] = getelementptr i32, ptr [[TMP5]], i64 [[IV]] |
| ; ORIGIN-NEXT: store i32 [[TMP8]], ptr [[TMP13]], align 4 |
| ; ORIGIN-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 |
| ; ORIGIN-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP12]] |
| ; ORIGIN-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]] |
| ; ORIGIN: .split.split: |
| ; ORIGIN-NEXT: br label [[TMP14]] |
| ; ORIGIN: 14: |
| ; ORIGIN-NEXT: store <vscale x 2 x float> [[A]], ptr [[B]], align 8 |
| ; ORIGIN-NEXT: ret void |
| ; |
| store <vscale x 2 x float> %a, ptr %b |
| ret void |
| } |
| |
| define void @test_param(ptr %a, ptr %b) sanitize_memory { |
| ; CHECK-LABEL: define void @test_param( |
| ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { |
| ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 |
| ; CHECK-NEXT: call void @llvm.donothing() |
| ; CHECK-NEXT: [[TMP2:%.*]] = load <vscale x 2 x float>, ptr [[A]], align 8 |
| ; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[A]] to i64 |
| ; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080 |
| ; CHECK-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr |
| ; CHECK-NEXT: [[_MSLD:%.*]] = load <vscale x 2 x i32>, ptr [[TMP5]], align 8 |
| ; CHECK-NEXT: store i64 [[TMP1]], ptr @__msan_param_tls, align 8 |
| ; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.or.nxv2i32(<vscale x 2 x i32> [[_MSLD]]) |
| ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i32 [[TMP6]], 0 |
| ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF1:![0-9]+]] |
| ; CHECK: 7: |
| ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR5:[0-9]+]] |
| ; CHECK-NEXT: unreachable |
| ; CHECK: 8: |
| ; CHECK-NEXT: call void @fn_param(<vscale x 2 x float> [[TMP2]], ptr [[B]]) |
| ; CHECK-NEXT: ret void |
| ; |
| ; ORIGIN-LABEL: define void @test_param( |
| ; ORIGIN-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { |
| ; ORIGIN-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 |
| ; ORIGIN-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4 |
| ; ORIGIN-NEXT: call void @llvm.donothing() |
| ; ORIGIN-NEXT: [[TMP3:%.*]] = load <vscale x 2 x float>, ptr [[A]], align 8 |
| ; ORIGIN-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[A]] to i64 |
| ; ORIGIN-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080 |
| ; ORIGIN-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr |
| ; ORIGIN-NEXT: [[TMP7:%.*]] = add i64 [[TMP5]], 17592186044416 |
| ; ORIGIN-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr |
| ; ORIGIN-NEXT: [[_MSLD:%.*]] = load <vscale x 2 x i32>, ptr [[TMP6]], align 8 |
| ; ORIGIN-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 8 |
| ; ORIGIN-NEXT: store i64 [[TMP1]], ptr @__msan_param_tls, align 8 |
| ; ORIGIN-NEXT: store i32 [[TMP2]], ptr @__msan_param_origin_tls, align 4 |
| ; ORIGIN-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.or.nxv2i32(<vscale x 2 x i32> [[_MSLD]]) |
| ; ORIGIN-NEXT: [[_MSCMP:%.*]] = icmp ne i32 [[TMP10]], 0 |
| ; ORIGIN-NEXT: br i1 [[_MSCMP]], label [[TMP11:%.*]], label [[TMP12:%.*]], !prof [[PROF1]] |
| ; ORIGIN: 11: |
| ; ORIGIN-NEXT: call void @__msan_warning_with_origin_noreturn(i32 [[TMP9]]) #[[ATTR5:[0-9]+]] |
| ; ORIGIN-NEXT: unreachable |
| ; ORIGIN: 12: |
| ; ORIGIN-NEXT: call void @fn_param(<vscale x 2 x float> [[TMP3]], ptr [[B]]) |
| ; ORIGIN-NEXT: ret void |
| ; |
| %1 = load <vscale x 2 x float>, ptr %a |
| call void @fn_param(<vscale x 2 x float> %1, ptr %b) |
| ret void |
| } |
| |
| define void @test_alloca1() sanitize_memory { |
| ; CHECK-LABEL: define void @test_alloca1( |
| ; CHECK-SAME: ) #[[ATTR0]] { |
| ; CHECK-NEXT: entry: |
| ; CHECK-NEXT: call void @llvm.donothing() |
| ; CHECK-NEXT: [[X:%.*]] = alloca <vscale x 64 x i1>, align 4 |
| ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() |
| ; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8 |
| ; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[X]] to i64 |
| ; CHECK-NEXT: [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080 |
| ; CHECK-NEXT: [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr |
| ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[TMP4]], i8 -1, i64 [[TMP1]], i1 false) |
| ; CHECK-NEXT: ret void |
| ; |
| ; ORIGIN-LABEL: define void @test_alloca1( |
| ; ORIGIN-SAME: ) #[[ATTR0]] { |
| ; ORIGIN-NEXT: entry: |
| ; ORIGIN-NEXT: call void @llvm.donothing() |
| ; ORIGIN-NEXT: [[X:%.*]] = alloca <vscale x 64 x i1>, align 4 |
| ; ORIGIN-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() |
| ; ORIGIN-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8 |
| ; ORIGIN-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[X]] to i64 |
| ; ORIGIN-NEXT: [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080 |
| ; ORIGIN-NEXT: [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr |
| ; ORIGIN-NEXT: [[TMP5:%.*]] = add i64 [[TMP3]], 17592186044416 |
| ; ORIGIN-NEXT: [[TMP6:%.*]] = and i64 [[TMP5]], -4 |
| ; ORIGIN-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr |
| ; ORIGIN-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[TMP4]], i8 -1, i64 [[TMP1]], i1 false) |
| ; ORIGIN-NEXT: call void @__msan_set_alloca_origin_with_descr(ptr [[X]], i64 [[TMP1]], ptr @[[GLOB0:[0-9]+]], ptr @[[GLOB1:[0-9]+]]) |
| ; ORIGIN-NEXT: ret void |
| ; |
| entry: |
| %x = alloca <vscale x 64 x i1>, align 4 |
| ret void |
| } |
| |
| define void @test_alloca2() sanitize_memory { |
| ; CHECK-LABEL: define void @test_alloca2( |
| ; CHECK-SAME: ) #[[ATTR0]] { |
| ; CHECK-NEXT: entry: |
| ; CHECK-NEXT: call void @llvm.donothing() |
| ; CHECK-NEXT: [[X:%.*]] = alloca <vscale x 64 x double>, align 4 |
| ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() |
| ; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 512 |
| ; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[X]] to i64 |
| ; CHECK-NEXT: [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080 |
| ; CHECK-NEXT: [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr |
| ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[TMP4]], i8 -1, i64 [[TMP1]], i1 false) |
| ; CHECK-NEXT: ret void |
| ; |
| ; ORIGIN-LABEL: define void @test_alloca2( |
| ; ORIGIN-SAME: ) #[[ATTR0]] { |
| ; ORIGIN-NEXT: entry: |
| ; ORIGIN-NEXT: call void @llvm.donothing() |
| ; ORIGIN-NEXT: [[X:%.*]] = alloca <vscale x 64 x double>, align 4 |
| ; ORIGIN-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() |
| ; ORIGIN-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 512 |
| ; ORIGIN-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[X]] to i64 |
| ; ORIGIN-NEXT: [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080 |
| ; ORIGIN-NEXT: [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr |
| ; ORIGIN-NEXT: [[TMP5:%.*]] = add i64 [[TMP3]], 17592186044416 |
| ; ORIGIN-NEXT: [[TMP6:%.*]] = and i64 [[TMP5]], -4 |
| ; ORIGIN-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr |
| ; ORIGIN-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[TMP4]], i8 -1, i64 [[TMP1]], i1 false) |
| ; ORIGIN-NEXT: call void @__msan_set_alloca_origin_with_descr(ptr [[X]], i64 [[TMP1]], ptr @[[GLOB2:[0-9]+]], ptr @[[GLOB3:[0-9]+]]) |
| ; ORIGIN-NEXT: ret void |
| ; |
| entry: |
| %x = alloca <vscale x 64 x double>, align 4 |
| ret void |
| } |
| |
| define void @test_alloca3() sanitize_memory { |
| ; CHECK-LABEL: define void @test_alloca3( |
| ; CHECK-SAME: ) #[[ATTR0]] { |
| ; CHECK-NEXT: entry: |
| ; CHECK-NEXT: call void @llvm.donothing() |
| ; CHECK-NEXT: [[X:%.*]] = alloca <vscale x 1 x i1>, align 4 |
| ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() |
| ; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[X]] to i64 |
| ; CHECK-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080 |
| ; CHECK-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr |
| ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[TMP3]], i8 -1, i64 [[TMP0]], i1 false) |
| ; CHECK-NEXT: ret void |
| ; |
| ; ORIGIN-LABEL: define void @test_alloca3( |
| ; ORIGIN-SAME: ) #[[ATTR0]] { |
| ; ORIGIN-NEXT: entry: |
| ; ORIGIN-NEXT: call void @llvm.donothing() |
| ; ORIGIN-NEXT: [[X:%.*]] = alloca <vscale x 1 x i1>, align 4 |
| ; ORIGIN-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() |
| ; ORIGIN-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[X]] to i64 |
| ; ORIGIN-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080 |
| ; ORIGIN-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr |
| ; ORIGIN-NEXT: [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416 |
| ; ORIGIN-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -4 |
| ; ORIGIN-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr |
| ; ORIGIN-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[TMP3]], i8 -1, i64 [[TMP0]], i1 false) |
| ; ORIGIN-NEXT: call void @__msan_set_alloca_origin_with_descr(ptr [[X]], i64 [[TMP0]], ptr @[[GLOB4:[0-9]+]], ptr @[[GLOB5:[0-9]+]]) |
| ; ORIGIN-NEXT: ret void |
| ; |
| entry: |
| %x = alloca <vscale x 1 x i1>, align 4 |
| ret void |
| } |
| |
| ;. |
| ; CHECK: [[PROF1]] = !{!"branch_weights", i32 1, i32 1048575} |
| ;. |
| ; ORIGIN: [[PROF1]] = !{!"branch_weights", i32 1, i32 1048575} |
| ;. |