| ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py |
| ; RUN: opt < %s -passes=instsimplify -S | FileCheck %s |
| |
| declare {i8, i1} @llvm.uadd.with.overflow.i8(i8 %a, i8 %b) |
| declare {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a, i8 %b) |
| declare {i8, i1} @llvm.usub.with.overflow.i8(i8 %a, i8 %b) |
| declare {i8, i1} @llvm.ssub.with.overflow.i8(i8 %a, i8 %b) |
| declare {i8, i1} @llvm.umul.with.overflow.i8(i8 %a, i8 %b) |
| declare {i8, i1} @llvm.smul.with.overflow.i8(i8 %a, i8 %b) |
| |
| define i1 @test_uadd1() { |
| ; CHECK-LABEL: @test_uadd1( |
| ; CHECK-NEXT: ret i1 true |
| ; |
| %x = call {i8, i1} @llvm.uadd.with.overflow.i8(i8 254, i8 3) |
| %overflow = extractvalue {i8, i1} %x, 1 |
| ret i1 %overflow |
| } |
| |
| define i8 @test_uadd2() { |
| ; CHECK-LABEL: @test_uadd2( |
| ; CHECK-NEXT: ret i8 42 |
| ; |
| %x = call {i8, i1} @llvm.uadd.with.overflow.i8(i8 254, i8 44) |
| %result = extractvalue {i8, i1} %x, 0 |
| ret i8 %result |
| } |
| |
| define {i8, i1} @test_uadd3(i8 %v) { |
| ; CHECK-LABEL: @test_uadd3( |
| ; CHECK-NEXT: ret { i8, i1 } { i8 -1, i1 false } |
| ; |
| %result = call {i8, i1} @llvm.uadd.with.overflow.i8(i8 %v, i8 undef) |
| ret {i8, i1} %result |
| } |
| |
| define {i8, i1} @test_uadd3_poison(i8 %v) { |
| ; CHECK-LABEL: @test_uadd3_poison( |
| ; CHECK-NEXT: ret { i8, i1 } { i8 -1, i1 false } |
| ; |
| %result = call {i8, i1} @llvm.uadd.with.overflow.i8(i8 %v, i8 poison) |
| ret {i8, i1} %result |
| } |
| |
| define {i8, i1} @test_uadd4(i8 %v) { |
| ; CHECK-LABEL: @test_uadd4( |
| ; CHECK-NEXT: ret { i8, i1 } { i8 -1, i1 false } |
| ; |
| %result = call {i8, i1} @llvm.uadd.with.overflow.i8(i8 undef, i8 %v) |
| ret {i8, i1} %result |
| } |
| |
| define {i8, i1} @test_uadd4_poison(i8 %v) { |
| ; CHECK-LABEL: @test_uadd4_poison( |
| ; CHECK-NEXT: ret { i8, i1 } { i8 -1, i1 false } |
| ; |
| %result = call {i8, i1} @llvm.uadd.with.overflow.i8(i8 poison, i8 %v) |
| ret {i8, i1} %result |
| } |
| |
| define i1 @test_sadd1() { |
| ; CHECK-LABEL: @test_sadd1( |
| ; CHECK-NEXT: ret i1 true |
| ; |
| %x = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 126, i8 3) |
| %overflow = extractvalue {i8, i1} %x, 1 |
| ret i1 %overflow |
| } |
| |
| define i8 @test_sadd2() { |
| ; CHECK-LABEL: @test_sadd2( |
| ; CHECK-NEXT: ret i8 -86 |
| ; |
| %x = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 126, i8 44) |
| %result = extractvalue {i8, i1} %x, 0 |
| ret i8 %result |
| } |
| |
| define {i8, i1} @test_sadd3(i8 %v) { |
| ; CHECK-LABEL: @test_sadd3( |
| ; CHECK-NEXT: ret { i8, i1 } { i8 -1, i1 false } |
| ; |
| %result = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %v, i8 undef) |
| ret {i8, i1} %result |
| } |
| |
| define {i8, i1} @test_sadd3_poison(i8 %v) { |
| ; CHECK-LABEL: @test_sadd3_poison( |
| ; CHECK-NEXT: ret { i8, i1 } { i8 -1, i1 false } |
| ; |
| %result = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %v, i8 poison) |
| ret {i8, i1} %result |
| } |
| |
| define {i8, i1} @test_sadd4(i8 %v) { |
| ; CHECK-LABEL: @test_sadd4( |
| ; CHECK-NEXT: ret { i8, i1 } { i8 -1, i1 false } |
| ; |
| %result = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 undef, i8 %v) |
| ret {i8, i1} %result |
| } |
| |
| define {i8, i1} @test_sadd4_poison(i8 %v) { |
| ; CHECK-LABEL: @test_sadd4_poison( |
| ; CHECK-NEXT: ret { i8, i1 } { i8 -1, i1 false } |
| ; |
| %result = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 poison, i8 %v) |
| ret {i8, i1} %result |
| } |
| |
| define {i8, i1} @test_usub1(i8 %V) { |
| ; CHECK-LABEL: @test_usub1( |
| ; CHECK-NEXT: ret { i8, i1 } zeroinitializer |
| ; |
| %x = call {i8, i1} @llvm.usub.with.overflow.i8(i8 %V, i8 %V) |
| ret {i8, i1} %x |
| } |
| |
| define {i8, i1} @test_usub2(i8 %V) { |
| ; CHECK-LABEL: @test_usub2( |
| ; CHECK-NEXT: ret { i8, i1 } zeroinitializer |
| ; |
| %x = call {i8, i1} @llvm.usub.with.overflow.i8(i8 %V, i8 undef) |
| ret {i8, i1} %x |
| } |
| |
| define {i8, i1} @test_usub2_poison(i8 %V) { |
| ; CHECK-LABEL: @test_usub2_poison( |
| ; CHECK-NEXT: ret { i8, i1 } zeroinitializer |
| ; |
| %x = call {i8, i1} @llvm.usub.with.overflow.i8(i8 %V, i8 poison) |
| ret {i8, i1} %x |
| } |
| |
| define {i8, i1} @test_usub3(i8 %V) { |
| ; CHECK-LABEL: @test_usub3( |
| ; CHECK-NEXT: ret { i8, i1 } zeroinitializer |
| ; |
| %x = call {i8, i1} @llvm.usub.with.overflow.i8(i8 undef, i8 %V) |
| ret {i8, i1} %x |
| } |
| |
| define {i8, i1} @test_usub3_poison(i8 %V) { |
| ; CHECK-LABEL: @test_usub3_poison( |
| ; CHECK-NEXT: ret { i8, i1 } zeroinitializer |
| ; |
| %x = call {i8, i1} @llvm.usub.with.overflow.i8(i8 poison, i8 %V) |
| ret {i8, i1} %x |
| } |
| |
| define {i8, i1} @test_ssub1(i8 %V) { |
| ; CHECK-LABEL: @test_ssub1( |
| ; CHECK-NEXT: ret { i8, i1 } zeroinitializer |
| ; |
| %x = call {i8, i1} @llvm.ssub.with.overflow.i8(i8 %V, i8 %V) |
| ret {i8, i1} %x |
| } |
| |
| define {i8, i1} @test_ssub2(i8 %V) { |
| ; CHECK-LABEL: @test_ssub2( |
| ; CHECK-NEXT: ret { i8, i1 } zeroinitializer |
| ; |
| %x = call {i8, i1} @llvm.ssub.with.overflow.i8(i8 %V, i8 undef) |
| ret {i8, i1} %x |
| } |
| |
| define {i8, i1} @test_ssub2_poison(i8 %V) { |
| ; CHECK-LABEL: @test_ssub2_poison( |
| ; CHECK-NEXT: ret { i8, i1 } zeroinitializer |
| ; |
| %x = call {i8, i1} @llvm.ssub.with.overflow.i8(i8 %V, i8 poison) |
| ret {i8, i1} %x |
| } |
| |
| define {i8, i1} @test_ssub3(i8 %V) { |
| ; CHECK-LABEL: @test_ssub3( |
| ; CHECK-NEXT: ret { i8, i1 } zeroinitializer |
| ; |
| %x = call {i8, i1} @llvm.ssub.with.overflow.i8(i8 undef, i8 %V) |
| ret {i8, i1} %x |
| } |
| |
| define {i8, i1} @test_ssub3_poison(i8 %V) { |
| ; CHECK-LABEL: @test_ssub3_poison( |
| ; CHECK-NEXT: ret { i8, i1 } zeroinitializer |
| ; |
| %x = call {i8, i1} @llvm.ssub.with.overflow.i8(i8 poison, i8 %V) |
| ret {i8, i1} %x |
| } |
| |
| define {i8, i1} @test_umul1(i8 %V) { |
| ; CHECK-LABEL: @test_umul1( |
| ; CHECK-NEXT: ret { i8, i1 } zeroinitializer |
| ; |
| %x = call {i8, i1} @llvm.umul.with.overflow.i8(i8 %V, i8 0) |
| ret {i8, i1} %x |
| } |
| |
| define {i8, i1} @test_umul2(i8 %V) { |
| ; CHECK-LABEL: @test_umul2( |
| ; CHECK-NEXT: ret { i8, i1 } zeroinitializer |
| ; |
| %x = call {i8, i1} @llvm.umul.with.overflow.i8(i8 %V, i8 undef) |
| ret {i8, i1} %x |
| } |
| |
| define {i8, i1} @test_umul2_poison(i8 %V) { |
| ; CHECK-LABEL: @test_umul2_poison( |
| ; CHECK-NEXT: ret { i8, i1 } zeroinitializer |
| ; |
| %x = call {i8, i1} @llvm.umul.with.overflow.i8(i8 %V, i8 poison) |
| ret {i8, i1} %x |
| } |
| |
| define {i8, i1} @test_umul3(i8 %V) { |
| ; CHECK-LABEL: @test_umul3( |
| ; CHECK-NEXT: ret { i8, i1 } zeroinitializer |
| ; |
| %x = call {i8, i1} @llvm.umul.with.overflow.i8(i8 0, i8 %V) |
| ret {i8, i1} %x |
| } |
| |
| define {i8, i1} @test_umul4(i8 %V) { |
| ; CHECK-LABEL: @test_umul4( |
| ; CHECK-NEXT: ret { i8, i1 } zeroinitializer |
| ; |
| %x = call {i8, i1} @llvm.umul.with.overflow.i8(i8 undef, i8 %V) |
| ret {i8, i1} %x |
| } |
| |
| define {i8, i1} @test_umul4_poison(i8 %V) { |
| ; CHECK-LABEL: @test_umul4_poison( |
| ; CHECK-NEXT: ret { i8, i1 } zeroinitializer |
| ; |
| %x = call {i8, i1} @llvm.umul.with.overflow.i8(i8 poison, i8 %V) |
| ret {i8, i1} %x |
| } |
| |
| define {i8, i1} @test_smul1(i8 %V) { |
| ; CHECK-LABEL: @test_smul1( |
| ; CHECK-NEXT: ret { i8, i1 } zeroinitializer |
| ; |
| %x = call {i8, i1} @llvm.smul.with.overflow.i8(i8 %V, i8 0) |
| ret {i8, i1} %x |
| } |
| |
| define {i8, i1} @test_smul2(i8 %V) { |
| ; CHECK-LABEL: @test_smul2( |
| ; CHECK-NEXT: ret { i8, i1 } zeroinitializer |
| ; |
| %x = call {i8, i1} @llvm.smul.with.overflow.i8(i8 %V, i8 undef) |
| ret {i8, i1} %x |
| } |
| |
| define {i8, i1} @test_smul2_poison(i8 %V) { |
| ; CHECK-LABEL: @test_smul2_poison( |
| ; CHECK-NEXT: ret { i8, i1 } zeroinitializer |
| ; |
| %x = call {i8, i1} @llvm.smul.with.overflow.i8(i8 %V, i8 poison) |
| ret {i8, i1} %x |
| } |
| |
| define {i8, i1} @test_smul3(i8 %V) { |
| ; CHECK-LABEL: @test_smul3( |
| ; CHECK-NEXT: ret { i8, i1 } zeroinitializer |
| ; |
| %x = call {i8, i1} @llvm.smul.with.overflow.i8(i8 0, i8 %V) |
| ret {i8, i1} %x |
| } |
| |
| define {i8, i1} @test_smul4(i8 %V) { |
| ; CHECK-LABEL: @test_smul4( |
| ; CHECK-NEXT: ret { i8, i1 } zeroinitializer |
| ; |
| %x = call {i8, i1} @llvm.smul.with.overflow.i8(i8 undef, i8 %V) |
| ret {i8, i1} %x |
| } |
| |
| define {i8, i1} @test_smul4_poison(i8 %V) { |
| ; CHECK-LABEL: @test_smul4_poison( |
| ; CHECK-NEXT: ret { i8, i1 } zeroinitializer |
| ; |
| %x = call {i8, i1} @llvm.smul.with.overflow.i8(i8 poison, i8 %V) |
| ret {i8, i1} %x |
| } |
| |
| ; Test a non-intrinsic that we know about as a library call. |
| declare float @fabsf(float %x) |
| |
| define float @test_fabs_libcall() { |
| ; CHECK-LABEL: @test_fabs_libcall( |
| ; CHECK-NEXT: [[X:%.*]] = call float @fabsf(float -4.200000e+01) |
| ; CHECK-NEXT: ret float 4.200000e+01 |
| ; |
| |
| %x = call float @fabsf(float -42.0) |
| ; This is still a real function call, so instsimplify won't nuke it -- other |
| ; passes have to do that. |
| |
| ret float %x |
| } |
| |
| |
| declare float @llvm.fabs.f32(float) nounwind readnone |
| declare float @llvm.floor.f32(float) nounwind readnone |
| declare float @llvm.ceil.f32(float) nounwind readnone |
| declare float @llvm.trunc.f32(float) nounwind readnone |
| declare float @llvm.rint.f32(float) nounwind readnone |
| declare float @llvm.nearbyint.f32(float) nounwind readnone |
| declare float @llvm.canonicalize.f32(float) nounwind readnone |
| declare float @llvm.arithmetic.fence.f32(float) nounwind readnone |
| |
| ; Test idempotent intrinsics |
| define float @test_idempotence(float %a) { |
| ; CHECK-LABEL: @test_idempotence( |
| ; CHECK-NEXT: [[A0:%.*]] = call float @llvm.fabs.f32(float [[A:%.*]]) |
| ; CHECK-NEXT: [[B0:%.*]] = call float @llvm.floor.f32(float [[A]]) |
| ; CHECK-NEXT: [[C0:%.*]] = call float @llvm.ceil.f32(float [[A]]) |
| ; CHECK-NEXT: [[D0:%.*]] = call float @llvm.trunc.f32(float [[A]]) |
| ; CHECK-NEXT: [[E0:%.*]] = call float @llvm.rint.f32(float [[A]]) |
| ; CHECK-NEXT: [[F0:%.*]] = call float @llvm.nearbyint.f32(float [[A]]) |
| ; CHECK-NEXT: [[G0:%.*]] = call float @llvm.canonicalize.f32(float [[A]]) |
| ; CHECK-NEXT: [[H0:%.*]] = call float @llvm.arithmetic.fence.f32(float [[A]]) |
| ; CHECK-NEXT: [[R0:%.*]] = fadd float [[A0]], [[B0]] |
| ; CHECK-NEXT: [[R1:%.*]] = fadd float [[R0]], [[C0]] |
| ; CHECK-NEXT: [[R2:%.*]] = fadd float [[R1]], [[D0]] |
| ; CHECK-NEXT: [[R3:%.*]] = fadd float [[R2]], [[E0]] |
| ; CHECK-NEXT: [[R4:%.*]] = fadd float [[R3]], [[F0]] |
| ; CHECK-NEXT: [[R5:%.*]] = fadd float [[R4]], [[G0]] |
| ; CHECK-NEXT: [[R6:%.*]] = fadd float [[R5]], [[H0]] |
| ; CHECK-NEXT: ret float [[R6]] |
| ; |
| |
| %a0 = call float @llvm.fabs.f32(float %a) |
| %a1 = call float @llvm.fabs.f32(float %a0) |
| |
| %b0 = call float @llvm.floor.f32(float %a) |
| %b1 = call float @llvm.floor.f32(float %b0) |
| |
| %c0 = call float @llvm.ceil.f32(float %a) |
| %c1 = call float @llvm.ceil.f32(float %c0) |
| |
| %d0 = call float @llvm.trunc.f32(float %a) |
| %d1 = call float @llvm.trunc.f32(float %d0) |
| |
| %e0 = call float @llvm.rint.f32(float %a) |
| %e1 = call float @llvm.rint.f32(float %e0) |
| |
| %f0 = call float @llvm.nearbyint.f32(float %a) |
| %f1 = call float @llvm.nearbyint.f32(float %f0) |
| |
| %g0 = call float @llvm.canonicalize.f32(float %a) |
| %g1 = call float @llvm.canonicalize.f32(float %g0) |
| |
| %h0 = call float @llvm.arithmetic.fence.f32(float %a) |
| %h1 = call float @llvm.arithmetic.fence.f32(float %h0) |
| |
| %r0 = fadd float %a1, %b1 |
| %r1 = fadd float %r0, %c1 |
| %r2 = fadd float %r1, %d1 |
| %r3 = fadd float %r2, %e1 |
| %r4 = fadd float %r3, %f1 |
| %r5 = fadd float %r4, %g1 |
| %r6 = fadd float %r5, %h1 |
| |
| ret float %r6 |
| } |
| |
| define ptr @operator_new() { |
| ; CHECK-LABEL: @operator_new( |
| ; CHECK-NEXT: entry: |
| ; CHECK-NEXT: [[CALL:%.*]] = tail call noalias ptr @_Znwm(i64 8) |
| ; CHECK-NEXT: br i1 false, label [[CAST_END:%.*]], label [[CAST_NOTNULL:%.*]] |
| ; CHECK: cast.notnull: |
| ; CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i8, ptr [[CALL]], i64 4 |
| ; CHECK-NEXT: br label [[CAST_END]] |
| ; CHECK: cast.end: |
| ; CHECK-NEXT: [[CAST_RESULT:%.*]] = phi ptr [ [[ADD_PTR]], [[CAST_NOTNULL]] ], [ null, [[ENTRY:%.*]] ] |
| ; CHECK-NEXT: ret ptr [[CAST_RESULT]] |
| ; |
| entry: |
| %call = tail call noalias ptr @_Znwm(i64 8) |
| %cmp = icmp eq ptr %call, null |
| br i1 %cmp, label %cast.end, label %cast.notnull |
| |
| cast.notnull: ; preds = %entry |
| %add.ptr = getelementptr inbounds i8, ptr %call, i64 4 |
| br label %cast.end |
| |
| cast.end: ; preds = %cast.notnull, %entry |
| %cast.result = phi ptr [ %add.ptr, %cast.notnull ], [ null, %entry ] |
| ret ptr %cast.result |
| |
| } |
| |
| declare nonnull noalias ptr @_Znwm(i64) |
| |
| %"struct.std::nothrow_t" = type { i8 } |
| @_ZSt7nothrow = external global %"struct.std::nothrow_t" |
| |
| define ptr @operator_new_nothrow_t() { |
| ; CHECK-LABEL: @operator_new_nothrow_t( |
| ; CHECK-NEXT: entry: |
| ; CHECK-NEXT: [[CALL:%.*]] = tail call noalias ptr @_ZnamRKSt9nothrow_t(i64 8, ptr @_ZSt7nothrow) |
| ; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[CALL]], null |
| ; CHECK-NEXT: br i1 [[CMP]], label [[CAST_END:%.*]], label [[CAST_NOTNULL:%.*]] |
| ; CHECK: cast.notnull: |
| ; CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i8, ptr [[CALL]], i64 4 |
| ; CHECK-NEXT: br label [[CAST_END]] |
| ; CHECK: cast.end: |
| ; CHECK-NEXT: [[CAST_RESULT:%.*]] = phi ptr [ [[ADD_PTR]], [[CAST_NOTNULL]] ], [ null, [[ENTRY:%.*]] ] |
| ; CHECK-NEXT: ret ptr [[CAST_RESULT]] |
| ; |
| entry: |
| %call = tail call noalias ptr @_ZnamRKSt9nothrow_t(i64 8, ptr @_ZSt7nothrow) |
| %cmp = icmp eq ptr %call, null |
| br i1 %cmp, label %cast.end, label %cast.notnull |
| |
| cast.notnull: ; preds = %entry |
| %add.ptr = getelementptr inbounds i8, ptr %call, i64 4 |
| br label %cast.end |
| |
| cast.end: ; preds = %cast.notnull, %entry |
| %cast.result = phi ptr [ %add.ptr, %cast.notnull ], [ null, %entry ] |
| ret ptr %cast.result |
| |
| } |
| |
| declare ptr @_ZnamRKSt9nothrow_t(i64, ptr) nounwind |
| |
| define ptr @malloc_can_return_null() { |
| ; CHECK-LABEL: @malloc_can_return_null( |
| ; CHECK-NEXT: entry: |
| ; CHECK-NEXT: [[CALL:%.*]] = tail call noalias ptr @malloc(i64 8) |
| ; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[CALL]], null |
| ; CHECK-NEXT: br i1 [[CMP]], label [[CAST_END:%.*]], label [[CAST_NOTNULL:%.*]] |
| ; CHECK: cast.notnull: |
| ; CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i8, ptr [[CALL]], i64 4 |
| ; CHECK-NEXT: br label [[CAST_END]] |
| ; CHECK: cast.end: |
| ; CHECK-NEXT: [[CAST_RESULT:%.*]] = phi ptr [ [[ADD_PTR]], [[CAST_NOTNULL]] ], [ null, [[ENTRY:%.*]] ] |
| ; CHECK-NEXT: ret ptr [[CAST_RESULT]] |
| ; |
| entry: |
| %call = tail call noalias ptr @malloc(i64 8) |
| %cmp = icmp eq ptr %call, null |
| br i1 %cmp, label %cast.end, label %cast.notnull |
| |
| cast.notnull: ; preds = %entry |
| %add.ptr = getelementptr inbounds i8, ptr %call, i64 4 |
| br label %cast.end |
| |
| cast.end: ; preds = %cast.notnull, %entry |
| %cast.result = phi ptr [ %add.ptr, %cast.notnull ], [ null, %entry ] |
| ret ptr %cast.result |
| |
| } |
| |
| define i32 @call_null() { |
| ; CHECK-LABEL: @call_null( |
| ; CHECK-NEXT: entry: |
| ; CHECK-NEXT: [[CALL:%.*]] = call i32 null() |
| ; CHECK-NEXT: ret i32 poison |
| ; |
| entry: |
| %call = call i32 null() |
| ret i32 %call |
| } |
| |
| define i32 @call_undef() { |
| ; CHECK-LABEL: @call_undef( |
| ; CHECK-NEXT: entry: |
| ; CHECK-NEXT: [[CALL:%.*]] = call i32 undef() |
| ; CHECK-NEXT: ret i32 poison |
| ; |
| entry: |
| %call = call i32 undef() |
| ret i32 %call |
| } |
| |
| @GV = private constant [8 x i32] [i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49] |
| |
| define <8 x i32> @partial_masked_load() { |
| ; CHECK-LABEL: @partial_masked_load( |
| ; CHECK-NEXT: ret <8 x i32> <i32 undef, i32 undef, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47> |
| ; |
| %masked.load = call <8 x i32> @llvm.masked.load.v8i32.p0(ptr getelementptr ([8 x i32], ptr @GV, i64 0, i64 -2), i32 4, <8 x i1> <i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i32> undef) |
| ret <8 x i32> %masked.load |
| } |
| |
| define <8 x i32> @masked_load_undef_mask(ptr %V) { |
| ; CHECK-LABEL: @masked_load_undef_mask( |
| ; CHECK-NEXT: ret <8 x i32> <i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0> |
| ; |
| %masked.load = call <8 x i32> @llvm.masked.load.v8i32.p0(ptr %V, i32 4, <8 x i1> undef, <8 x i32> <i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0>) |
| ret <8 x i32> %masked.load |
| } |
| |
| declare noalias ptr @malloc(i64) |
| |
| declare <8 x i32> @llvm.masked.load.v8i32.p0(ptr, i32, <8 x i1>, <8 x i32>) |
| |
| declare double @llvm.powi.f64.i16(double, i16) |
| declare <2 x double> @llvm.powi.v2f64.i16(<2 x double>, i16) |
| declare double @llvm.powi.f64.i32(double, i32) |
| declare <2 x double> @llvm.powi.v2f64.i32(<2 x double>, i32) |
| |
| define double @constant_fold_powi() { |
| ; CHECK-LABEL: @constant_fold_powi( |
| ; CHECK-NEXT: ret double 9.000000e+00 |
| ; |
| %t0 = call double @llvm.powi.f64.i32(double 3.00000e+00, i32 2) |
| ret double %t0 |
| } |
| |
| define double @constant_fold_powi_i16() { |
| ; CHECK-LABEL: @constant_fold_powi_i16( |
| ; CHECK-NEXT: ret double 9.000000e+00 |
| ; |
| %t0 = call double @llvm.powi.f64.i16(double 3.00000e+00, i16 2) |
| ret double %t0 |
| } |
| |
| define <2 x double> @constant_fold_powi_vec() { |
| ; CHECK-LABEL: @constant_fold_powi_vec( |
| ; CHECK-NEXT: ret <2 x double> <double 9.000000e+00, double 2.500000e+01> |
| ; |
| %t0 = call <2 x double> @llvm.powi.v2f64.i32(<2 x double> <double 3.00000e+00, double 5.00000e+00>, i32 2) |
| ret <2 x double> %t0 |
| } |
| |
| define <2 x double> @constant_fold_powi_vec_i16() { |
| ; CHECK-LABEL: @constant_fold_powi_vec_i16( |
| ; CHECK-NEXT: ret <2 x double> <double 9.000000e+00, double 2.500000e+01> |
| ; |
| %t0 = call <2 x double> @llvm.powi.v2f64.i16(<2 x double> <double 3.00000e+00, double 5.00000e+00>, i16 2) |
| ret <2 x double> %t0 |
| } |
| |
| declare i8 @llvm.fshl.i8(i8, i8, i8) |
| declare i9 @llvm.fshr.i9(i9, i9, i9) |
| declare <2 x i7> @llvm.fshl.v2i7(<2 x i7>, <2 x i7>, <2 x i7>) |
| declare <2 x i8> @llvm.fshr.v2i8(<2 x i8>, <2 x i8>, <2 x i8>) |
| |
| define i8 @fshl_no_shift(i8 %x, i8 %y) { |
| ; CHECK-LABEL: @fshl_no_shift( |
| ; CHECK-NEXT: ret i8 [[X:%.*]] |
| ; |
| %z = call i8 @llvm.fshl.i8(i8 %x, i8 %y, i8 0) |
| ret i8 %z |
| } |
| |
| define i9 @fshr_no_shift(i9 %x, i9 %y) { |
| ; CHECK-LABEL: @fshr_no_shift( |
| ; CHECK-NEXT: ret i9 [[Y:%.*]] |
| ; |
| %z = call i9 @llvm.fshr.i9(i9 %x, i9 %y, i9 0) |
| ret i9 %z |
| } |
| |
| define i8 @fshl_no_shift_modulo_bitwidth(i8 %x, i8 %y) { |
| ; CHECK-LABEL: @fshl_no_shift_modulo_bitwidth( |
| ; CHECK-NEXT: ret i8 [[X:%.*]] |
| ; |
| %z = call i8 @llvm.fshl.i8(i8 %x, i8 %y, i8 40) |
| ret i8 %z |
| } |
| |
| define i9 @fshr_no_shift_modulo_bitwidth(i9 %x, i9 %y) { |
| ; CHECK-LABEL: @fshr_no_shift_modulo_bitwidth( |
| ; CHECK-NEXT: ret i9 [[Y:%.*]] |
| ; |
| %z = call i9 @llvm.fshr.i9(i9 %x, i9 %y, i9 189) |
| ret i9 %z |
| } |
| |
| define <2 x i7> @fshl_no_shift_modulo_bitwidth_splat(<2 x i7> %x, <2 x i7> %y) { |
| ; CHECK-LABEL: @fshl_no_shift_modulo_bitwidth_splat( |
| ; CHECK-NEXT: ret <2 x i7> [[X:%.*]] |
| ; |
| %z = call <2 x i7> @llvm.fshl.v2i7(<2 x i7> %x, <2 x i7> %y, <2 x i7> <i7 21, i7 21>) |
| ret <2 x i7> %z |
| } |
| |
| define <2 x i8> @fshr_no_shift_modulo_bitwidth_splat(<2 x i8> %x, <2 x i8> %y) { |
| ; CHECK-LABEL: @fshr_no_shift_modulo_bitwidth_splat( |
| ; CHECK-NEXT: ret <2 x i8> [[Y:%.*]] |
| ; |
| %z = call <2 x i8> @llvm.fshr.v2i8(<2 x i8> %x, <2 x i8> %y, <2 x i8> <i8 72, i8 72>) |
| ret <2 x i8> %z |
| } |
| |
| ; If y is poison, eliminating the guard is not safe. |
| |
| define i8 @fshl_zero_shift_guard(i8 %x, i8 %y, i8 %sh) { |
| ; CHECK-LABEL: @fshl_zero_shift_guard( |
| ; CHECK-NEXT: [[C:%.*]] = icmp eq i8 [[SH:%.*]], 0 |
| ; CHECK-NEXT: [[F:%.*]] = call i8 @llvm.fshl.i8(i8 [[X:%.*]], i8 [[Y:%.*]], i8 [[SH]]) |
| ; CHECK-NEXT: [[S:%.*]] = select i1 [[C]], i8 [[X]], i8 [[F]] |
| ; CHECK-NEXT: ret i8 [[S]] |
| ; |
| %c = icmp eq i8 %sh, 0 |
| %f = call i8 @llvm.fshl.i8(i8 %x, i8 %y, i8 %sh) |
| %s = select i1 %c, i8 %x, i8 %f |
| ret i8 %s |
| } |
| |
| ; If y is poison, eliminating the guard is not safe. |
| |
| define i8 @fshl_zero_shift_guard_swapped(i8 %x, i8 %y, i8 %sh) { |
| ; CHECK-LABEL: @fshl_zero_shift_guard_swapped( |
| ; CHECK-NEXT: [[C:%.*]] = icmp ne i8 [[SH:%.*]], 0 |
| ; CHECK-NEXT: [[F:%.*]] = call i8 @llvm.fshl.i8(i8 [[X:%.*]], i8 [[Y:%.*]], i8 [[SH]]) |
| ; CHECK-NEXT: [[S:%.*]] = select i1 [[C]], i8 [[F]], i8 [[X]] |
| ; CHECK-NEXT: ret i8 [[S]] |
| ; |
| %c = icmp ne i8 %sh, 0 |
| %f = call i8 @llvm.fshl.i8(i8 %x, i8 %y, i8 %sh) |
| %s = select i1 %c, i8 %f, i8 %x |
| ret i8 %s |
| } |
| |
| ; When the shift amount is 0, fshl returns its 1st parameter (x), so everything is deleted. |
| |
| define i8 @fshl_zero_shift_guard_inverted(i8 %x, i8 %y, i8 %sh) { |
| ; CHECK-LABEL: @fshl_zero_shift_guard_inverted( |
| ; CHECK-NEXT: ret i8 [[X:%.*]] |
| ; |
| %c = icmp eq i8 %sh, 0 |
| %f = call i8 @llvm.fshl.i8(i8 %x, i8 %y, i8 %sh) |
| %s = select i1 %c, i8 %f, i8 %x |
| ret i8 %s |
| } |
| |
| ; When the shift amount is 0, fshl returns its 1st parameter (x), so everything is deleted. |
| |
| define i8 @fshl_zero_shift_guard_inverted_swapped(i8 %x, i8 %y, i8 %sh) { |
| ; CHECK-LABEL: @fshl_zero_shift_guard_inverted_swapped( |
| ; CHECK-NEXT: ret i8 [[X:%.*]] |
| ; |
| %c = icmp ne i8 %sh, 0 |
| %f = call i8 @llvm.fshl.i8(i8 %x, i8 %y, i8 %sh) |
| %s = select i1 %c, i8 %x, i8 %f |
| ret i8 %s |
| } |
| |
| ; If x is poison, eliminating the guard is not safe. |
| |
| define i9 @fshr_zero_shift_guard(i9 %x, i9 %y, i9 %sh) { |
| ; CHECK-LABEL: @fshr_zero_shift_guard( |
| ; CHECK-NEXT: [[C:%.*]] = icmp eq i9 [[SH:%.*]], 0 |
| ; CHECK-NEXT: [[F:%.*]] = call i9 @llvm.fshr.i9(i9 [[X:%.*]], i9 [[Y:%.*]], i9 [[SH]]) |
| ; CHECK-NEXT: [[S:%.*]] = select i1 [[C]], i9 [[Y]], i9 [[F]] |
| ; CHECK-NEXT: ret i9 [[S]] |
| ; |
| %c = icmp eq i9 %sh, 0 |
| %f = call i9 @llvm.fshr.i9(i9 %x, i9 %y, i9 %sh) |
| %s = select i1 %c, i9 %y, i9 %f |
| ret i9 %s |
| } |
| |
| ; If x is poison, eliminating the guard is not safe. |
| |
| define i9 @fshr_zero_shift_guard_swapped(i9 %x, i9 %y, i9 %sh) { |
| ; CHECK-LABEL: @fshr_zero_shift_guard_swapped( |
| ; CHECK-NEXT: [[C:%.*]] = icmp ne i9 [[SH:%.*]], 0 |
| ; CHECK-NEXT: [[F:%.*]] = call i9 @llvm.fshr.i9(i9 [[X:%.*]], i9 [[Y:%.*]], i9 [[SH]]) |
| ; CHECK-NEXT: [[S:%.*]] = select i1 [[C]], i9 [[F]], i9 [[Y]] |
| ; CHECK-NEXT: ret i9 [[S]] |
| ; |
| %c = icmp ne i9 %sh, 0 |
| %f = call i9 @llvm.fshr.i9(i9 %x, i9 %y, i9 %sh) |
| %s = select i1 %c, i9 %f, i9 %y |
| ret i9 %s |
| } |
| |
| ; When the shift amount is 0, fshr returns its 2nd parameter (y), so everything is deleted. |
| |
| define i9 @fshr_zero_shift_guard_inverted(i9 %x, i9 %y, i9 %sh) { |
| ; CHECK-LABEL: @fshr_zero_shift_guard_inverted( |
| ; CHECK-NEXT: ret i9 [[Y:%.*]] |
| ; |
| %c = icmp eq i9 %sh, 0 |
| %f = call i9 @llvm.fshr.i9(i9 %x, i9 %y, i9 %sh) |
| %s = select i1 %c, i9 %f, i9 %y |
| ret i9 %s |
| } |
| |
| ; When the shift amount is 0, fshr returns its 2nd parameter (y), so everything is deleted. |
| |
| define i9 @fshr_zero_shift_guard_inverted_swapped(i9 %x, i9 %y, i9 %sh) { |
| ; CHECK-LABEL: @fshr_zero_shift_guard_inverted_swapped( |
| ; CHECK-NEXT: ret i9 [[Y:%.*]] |
| ; |
| %c = icmp ne i9 %sh, 0 |
| %f = call i9 @llvm.fshr.i9(i9 %x, i9 %y, i9 %sh) |
| %s = select i1 %c, i9 %y, i9 %f |
| ret i9 %s |
| } |
| |
| ; When the shift amount is 0, fshl returns its 1st parameter (x), so the guard is not needed. |
| |
| define i8 @rotl_zero_shift_guard(i8 %x, i8 %sh) { |
| ; CHECK-LABEL: @rotl_zero_shift_guard( |
| ; CHECK-NEXT: [[F:%.*]] = call i8 @llvm.fshl.i8(i8 [[X:%.*]], i8 [[X]], i8 [[SH:%.*]]) |
| ; CHECK-NEXT: ret i8 [[F]] |
| ; |
| %c = icmp eq i8 %sh, 0 |
| %f = call i8 @llvm.fshl.i8(i8 %x, i8 %x, i8 %sh) |
| %s = select i1 %c, i8 %x, i8 %f |
| ret i8 %s |
| } |
| |
| ; When the shift amount is 0, fshl returns its 1st parameter (x), so the guard is not needed. |
| |
| define i8 @rotl_zero_shift_guard_swapped(i8 %x, i8 %sh) { |
| ; CHECK-LABEL: @rotl_zero_shift_guard_swapped( |
| ; CHECK-NEXT: [[F:%.*]] = call i8 @llvm.fshl.i8(i8 [[X:%.*]], i8 [[X]], i8 [[SH:%.*]]) |
| ; CHECK-NEXT: ret i8 [[F]] |
| ; |
| %c = icmp ne i8 %sh, 0 |
| %f = call i8 @llvm.fshl.i8(i8 %x, i8 %x, i8 %sh) |
| %s = select i1 %c, i8 %f, i8 %x |
| ret i8 %s |
| } |
| |
| ; When the shift amount is 0, fshl returns its 1st parameter (x), so everything is deleted. |
| |
| define i8 @rotl_zero_shift_guard_inverted(i8 %x, i8 %sh) { |
| ; CHECK-LABEL: @rotl_zero_shift_guard_inverted( |
| ; CHECK-NEXT: ret i8 [[X:%.*]] |
| ; |
| %c = icmp eq i8 %sh, 0 |
| %f = call i8 @llvm.fshl.i8(i8 %x, i8 %x, i8 %sh) |
| %s = select i1 %c, i8 %f, i8 %x |
| ret i8 %s |
| } |
| |
| ; When the shift amount is 0, fshl returns its 1st parameter (x), so everything is deleted. |
| |
| define i8 @rotl_zero_shift_guard_inverted_swapped(i8 %x, i8 %sh) { |
| ; CHECK-LABEL: @rotl_zero_shift_guard_inverted_swapped( |
| ; CHECK-NEXT: ret i8 [[X:%.*]] |
| ; |
| %c = icmp ne i8 %sh, 0 |
| %f = call i8 @llvm.fshl.i8(i8 %x, i8 %x, i8 %sh) |
| %s = select i1 %c, i8 %x, i8 %f |
| ret i8 %s |
| } |
| |
| ; When the shift amount is 0, fshr returns its 2nd parameter (x), so the guard is not needed. |
| |
| define i9 @rotr_zero_shift_guard(i9 %x, i9 %sh) { |
| ; CHECK-LABEL: @rotr_zero_shift_guard( |
| ; CHECK-NEXT: [[F:%.*]] = call i9 @llvm.fshr.i9(i9 [[X:%.*]], i9 [[X]], i9 [[SH:%.*]]) |
| ; CHECK-NEXT: ret i9 [[F]] |
| ; |
| %c = icmp eq i9 %sh, 0 |
| %f = call i9 @llvm.fshr.i9(i9 %x, i9 %x, i9 %sh) |
| %s = select i1 %c, i9 %x, i9 %f |
| ret i9 %s |
| } |
| |
| ; When the shift amount is 0, fshr returns its 2nd parameter (x), so the guard is not needed. |
| |
| define i9 @rotr_zero_shift_guard_swapped(i9 %x, i9 %sh) { |
| ; CHECK-LABEL: @rotr_zero_shift_guard_swapped( |
| ; CHECK-NEXT: [[F:%.*]] = call i9 @llvm.fshr.i9(i9 [[X:%.*]], i9 [[X]], i9 [[SH:%.*]]) |
| ; CHECK-NEXT: ret i9 [[F]] |
| ; |
| %c = icmp ne i9 %sh, 0 |
| %f = call i9 @llvm.fshr.i9(i9 %x, i9 %x, i9 %sh) |
| %s = select i1 %c, i9 %f, i9 %x |
| ret i9 %s |
| } |
| |
| ; When the shift amount is 0, fshr returns its 2nd parameter (x), so everything is deleted. |
| |
| define i9 @rotr_zero_shift_guard_inverted(i9 %x, i9 %sh) { |
| ; CHECK-LABEL: @rotr_zero_shift_guard_inverted( |
| ; CHECK-NEXT: ret i9 [[X:%.*]] |
| ; |
| %c = icmp eq i9 %sh, 0 |
| %f = call i9 @llvm.fshr.i9(i9 %x, i9 %x, i9 %sh) |
| %s = select i1 %c, i9 %f, i9 %x |
| ret i9 %s |
| } |
| |
| ; When the shift amount is 0, fshr returns its 2nd parameter (x), so everything is deleted. |
| |
| define i9 @rotr_zero_shift_guard_inverted_swapped(i9 %x, i9 %sh) { |
| ; CHECK-LABEL: @rotr_zero_shift_guard_inverted_swapped( |
| ; CHECK-NEXT: ret i9 [[X:%.*]] |
| ; |
| %c = icmp ne i9 %sh, 0 |
| %f = call i9 @llvm.fshr.i9(i9 %x, i9 %x, i9 %sh) |
| %s = select i1 %c, i9 %x, i9 %f |
| ret i9 %s |
| } |
| |
| ; Negative test - make sure we're matching the correct parameter of fshl. |
| |
| define i8 @fshl_zero_shift_guard_wrong_select_op(i8 %x, i8 %y, i8 %sh) { |
| ; CHECK-LABEL: @fshl_zero_shift_guard_wrong_select_op( |
| ; CHECK-NEXT: [[C:%.*]] = icmp eq i8 [[SH:%.*]], 0 |
| ; CHECK-NEXT: [[F:%.*]] = call i8 @llvm.fshl.i8(i8 [[X:%.*]], i8 [[Y:%.*]], i8 [[SH]]) |
| ; CHECK-NEXT: [[S:%.*]] = select i1 [[C]], i8 [[Y]], i8 [[F]] |
| ; CHECK-NEXT: ret i8 [[S]] |
| ; |
| %c = icmp eq i8 %sh, 0 |
| %f = call i8 @llvm.fshl.i8(i8 %x, i8 %y, i8 %sh) |
| %s = select i1 %c, i8 %y, i8 %f |
| ret i8 %s |
| } |
| |
| ; Vector types work too. |
| |
| define <2 x i8> @rotr_zero_shift_guard_splat(<2 x i8> %x, <2 x i8> %sh) { |
| ; CHECK-LABEL: @rotr_zero_shift_guard_splat( |
| ; CHECK-NEXT: [[F:%.*]] = call <2 x i8> @llvm.fshr.v2i8(<2 x i8> [[X:%.*]], <2 x i8> [[X]], <2 x i8> [[SH:%.*]]) |
| ; CHECK-NEXT: ret <2 x i8> [[F]] |
| ; |
| %c = icmp eq <2 x i8> %sh, zeroinitializer |
| %f = call <2 x i8> @llvm.fshr.v2i8(<2 x i8> %x, <2 x i8> %x, <2 x i8> %sh) |
| %s = select <2 x i1> %c, <2 x i8> %x, <2 x i8> %f |
| ret <2 x i8> %s |
| } |
| |
| ; If first two operands of funnel shift are undef, the result is undef |
| |
| define i8 @fshl_ops_undef(i8 %shamt) { |
| ; CHECK-LABEL: @fshl_ops_undef( |
| ; CHECK-NEXT: ret i8 undef |
| ; |
| %r = call i8 @llvm.fshl.i8(i8 undef, i8 undef, i8 %shamt) |
| ret i8 %r |
| } |
| |
| define i9 @fshr_ops_undef(i9 %shamt) { |
| ; CHECK-LABEL: @fshr_ops_undef( |
| ; CHECK-NEXT: ret i9 undef |
| ; |
| %r = call i9 @llvm.fshr.i9(i9 undef, i9 undef, i9 %shamt) |
| ret i9 %r |
| } |
| |
| ; If shift amount is undef, treat it as zero, returning operand 0 or 1 |
| |
| define i8 @fshl_shift_undef(i8 %x, i8 %y) { |
| ; CHECK-LABEL: @fshl_shift_undef( |
| ; CHECK-NEXT: ret i8 [[X:%.*]] |
| ; |
| %r = call i8 @llvm.fshl.i8(i8 %x, i8 %y, i8 undef) |
| ret i8 %r |
| } |
| |
| define i9 @fshr_shift_undef(i9 %x, i9 %y) { |
| ; CHECK-LABEL: @fshr_shift_undef( |
| ; CHECK-NEXT: ret i9 [[Y:%.*]] |
| ; |
| %r = call i9 @llvm.fshr.i9(i9 %x, i9 %y, i9 undef) |
| ret i9 %r |
| } |
| |
| ; If one of operands is poison, the result is poison |
| ; TODO: these should be poison |
| define i8 @fshl_ops_poison(i8 %b, i8 %shamt) { |
| ; CHECK-LABEL: @fshl_ops_poison( |
| ; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.fshl.i8(i8 poison, i8 [[B:%.*]], i8 [[SHAMT:%.*]]) |
| ; CHECK-NEXT: ret i8 [[R]] |
| ; |
| %r = call i8 @llvm.fshl.i8(i8 poison, i8 %b, i8 %shamt) |
| ret i8 %r |
| } |
| |
| define i8 @fshl_ops_poison2(i8 %shamt) { |
| ; CHECK-LABEL: @fshl_ops_poison2( |
| ; CHECK-NEXT: ret i8 undef |
| ; |
| %r = call i8 @llvm.fshl.i8(i8 poison, i8 undef, i8 %shamt) |
| ret i8 %r |
| } |
| |
| define i8 @fshl_ops_poison3(i8 %a, i8 %shamt) { |
| ; CHECK-LABEL: @fshl_ops_poison3( |
| ; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.fshl.i8(i8 [[A:%.*]], i8 poison, i8 [[SHAMT:%.*]]) |
| ; CHECK-NEXT: ret i8 [[R]] |
| ; |
| %r = call i8 @llvm.fshl.i8(i8 %a, i8 poison, i8 %shamt) |
| ret i8 %r |
| } |
| |
| define i8 @fshl_ops_poison4(i8 %shamt) { |
| ; CHECK-LABEL: @fshl_ops_poison4( |
| ; CHECK-NEXT: ret i8 undef |
| ; |
| %r = call i8 @llvm.fshl.i8(i8 undef, i8 poison, i8 %shamt) |
| ret i8 %r |
| } |
| |
| define i8 @fshl_ops_poison5(i8 %a, i8 %b) { |
| ; CHECK-LABEL: @fshl_ops_poison5( |
| ; CHECK-NEXT: ret i8 [[A:%.*]] |
| ; |
| %r = call i8 @llvm.fshl.i8(i8 %a, i8 %b, i8 poison) |
| ret i8 %r |
| } |
| |
| define i8 @fshl_ops_poison6() { |
| ; CHECK-LABEL: @fshl_ops_poison6( |
| ; CHECK-NEXT: ret i8 undef |
| ; |
| %r = call i8 @llvm.fshl.i8(i8 undef, i8 undef, i8 poison) |
| ret i8 %r |
| } |
| |
| define i9 @fshr_ops_poison(i9 %b, i9 %shamt) { |
| ; CHECK-LABEL: @fshr_ops_poison( |
| ; CHECK-NEXT: [[R:%.*]] = call i9 @llvm.fshr.i9(i9 poison, i9 [[B:%.*]], i9 [[SHAMT:%.*]]) |
| ; CHECK-NEXT: ret i9 [[R]] |
| ; |
| %r = call i9 @llvm.fshr.i9(i9 poison, i9 %b, i9 %shamt) |
| ret i9 %r |
| } |
| |
| define i9 @fshr_ops_poison2(i9 %shamt) { |
| ; CHECK-LABEL: @fshr_ops_poison2( |
| ; CHECK-NEXT: ret i9 undef |
| ; |
| %r = call i9 @llvm.fshr.i9(i9 poison, i9 undef, i9 %shamt) |
| ret i9 %r |
| } |
| |
| define i9 @fshr_ops_poison3(i9 %a, i9 %shamt) { |
| ; CHECK-LABEL: @fshr_ops_poison3( |
| ; CHECK-NEXT: [[R:%.*]] = call i9 @llvm.fshr.i9(i9 [[A:%.*]], i9 poison, i9 [[SHAMT:%.*]]) |
| ; CHECK-NEXT: ret i9 [[R]] |
| ; |
| %r = call i9 @llvm.fshr.i9(i9 %a, i9 poison, i9 %shamt) |
| ret i9 %r |
| } |
| |
| define i9 @fshr_ops_poison4(i9 %shamt) { |
| ; CHECK-LABEL: @fshr_ops_poison4( |
| ; CHECK-NEXT: ret i9 undef |
| ; |
| %r = call i9 @llvm.fshr.i9(i9 undef, i9 poison, i9 %shamt) |
| ret i9 %r |
| } |
| |
| define i9 @fshr_ops_poison5(i9 %a, i9 %b) { |
| ; CHECK-LABEL: @fshr_ops_poison5( |
| ; CHECK-NEXT: ret i9 [[B:%.*]] |
| ; |
| %r = call i9 @llvm.fshr.i9(i9 %a, i9 %b, i9 poison) |
| ret i9 %r |
| } |
| |
| define i9 @fshr_ops_poison6() { |
| ; CHECK-LABEL: @fshr_ops_poison6( |
| ; CHECK-NEXT: ret i9 undef |
| ; |
| %r = call i9 @llvm.fshr.i9(i9 undef, i9 undef, i9 poison) |
| ret i9 %r |
| } |
| |
| define i8 @fshl_zero(i8 %shamt) { |
| ; CHECK-LABEL: @fshl_zero( |
| ; CHECK-NEXT: ret i8 0 |
| ; |
| %r = call i8 @llvm.fshl.i8(i8 0, i8 0, i8 %shamt) |
| ret i8 %r |
| } |
| |
| define <2 x i8> @fshr_zero_vec(<2 x i8> %shamt) { |
| ; CHECK-LABEL: @fshr_zero_vec( |
| ; CHECK-NEXT: ret <2 x i8> zeroinitializer |
| ; |
| %r = call <2 x i8> @llvm.fshr.v2i8(<2 x i8> zeroinitializer, <2 x i8> <i8 0, i8 undef>, <2 x i8> %shamt) |
| ret <2 x i8> %r |
| } |
| |
| define <2 x i7> @fshl_ones_vec(<2 x i7> %shamt) { |
| ; CHECK-LABEL: @fshl_ones_vec( |
| ; CHECK-NEXT: ret <2 x i7> <i7 -1, i7 -1> |
| ; |
| %r = call <2 x i7> @llvm.fshl.v2i7(<2 x i7> <i7 undef, i7 -1>, <2 x i7> <i7 -1, i7 undef>, <2 x i7> %shamt) |
| ret <2 x i7> %r |
| } |
| |
| define i9 @fshr_ones(i9 %shamt) { |
| ; CHECK-LABEL: @fshr_ones( |
| ; CHECK-NEXT: ret i9 -1 |
| ; |
| %r = call i9 @llvm.fshr.i9(i9 -1, i9 -1, i9 %shamt) |
| ret i9 %r |
| } |
| |
| declare double @llvm.fma.f64(double,double,double) |
| declare double @llvm.fmuladd.f64(double,double,double) |
| |
| define double @fma_undef_op0(double %x, double %y) { |
| ; CHECK-LABEL: @fma_undef_op0( |
| ; CHECK-NEXT: ret double 0x7FF8000000000000 |
| ; |
| %r = call double @llvm.fma.f64(double undef, double %x, double %y) |
| ret double %r |
| } |
| |
| define double @fma_poison_op0(double %x, double %y) { |
| ; CHECK-LABEL: @fma_poison_op0( |
| ; CHECK-NEXT: ret double poison |
| ; |
| %r = call double @llvm.fma.f64(double poison, double %x, double %y) |
| ret double %r |
| } |
| |
| define double @fma_undef_op1(double %x, double %y) { |
| ; CHECK-LABEL: @fma_undef_op1( |
| ; CHECK-NEXT: ret double 0x7FF8000000000000 |
| ; |
| %r = call double @llvm.fma.f64(double %x, double undef, double %y) |
| ret double %r |
| } |
| |
| define double @fma_poison_op1(double %x, double %y) { |
| ; CHECK-LABEL: @fma_poison_op1( |
| ; CHECK-NEXT: ret double poison |
| ; |
| %r = call double @llvm.fma.f64(double %x, double poison, double %y) |
| ret double %r |
| } |
| |
| define double @fma_undef_op2(double %x, double %y) { |
| ; CHECK-LABEL: @fma_undef_op2( |
| ; CHECK-NEXT: ret double 0x7FF8000000000000 |
| ; |
| %r = call double @llvm.fma.f64(double %x, double %y, double undef) |
| ret double %r |
| } |
| |
| define double @fma_poison_op2(double %x, double %y) { |
| ; CHECK-LABEL: @fma_poison_op2( |
| ; CHECK-NEXT: ret double poison |
| ; |
| %r = call double @llvm.fma.f64(double %x, double %y, double poison) |
| ret double %r |
| } |
| |
| define double @fma_undef_op0_poison_op1(double %x) { |
| ; CHECK-LABEL: @fma_undef_op0_poison_op1( |
| ; CHECK-NEXT: ret double poison |
| ; |
| %r = call double @llvm.fma.f64(double undef, double poison, double %x) |
| ret double %r |
| } |
| |
| define double @fma_undef_op0_poison_op2(double %x) { |
| ; CHECK-LABEL: @fma_undef_op0_poison_op2( |
| ; CHECK-NEXT: ret double poison |
| ; |
| %r = call double @llvm.fma.f64(double undef, double %x, double poison) |
| ret double %r |
| } |
| |
| define double @fmuladd_undef_op0(double %x, double %y) { |
| ; CHECK-LABEL: @fmuladd_undef_op0( |
| ; CHECK-NEXT: ret double 0x7FF8000000000000 |
| ; |
| %r = call double @llvm.fmuladd.f64(double undef, double %x, double %y) |
| ret double %r |
| } |
| |
| define double @fmuladd_poison_op0(double %x, double %y) { |
| ; CHECK-LABEL: @fmuladd_poison_op0( |
| ; CHECK-NEXT: ret double poison |
| ; |
| %r = call double @llvm.fmuladd.f64(double poison, double %x, double %y) |
| ret double %r |
| } |
| |
| define double @fmuladd_undef_op1(double %x, double %y) { |
| ; CHECK-LABEL: @fmuladd_undef_op1( |
| ; CHECK-NEXT: ret double 0x7FF8000000000000 |
| ; |
| %r = call double @llvm.fmuladd.f64(double %x, double undef, double %y) |
| ret double %r |
| } |
| |
| define double @fmuladd_poison_op1(double %x, double %y) { |
| ; CHECK-LABEL: @fmuladd_poison_op1( |
| ; CHECK-NEXT: ret double poison |
| ; |
| %r = call double @llvm.fmuladd.f64(double %x, double poison, double %y) |
| ret double %r |
| } |
| |
| define double @fmuladd_undef_op2(double %x, double %y) { |
| ; CHECK-LABEL: @fmuladd_undef_op2( |
| ; CHECK-NEXT: ret double 0x7FF8000000000000 |
| ; |
| %r = call double @llvm.fmuladd.f64(double %x, double %y, double undef) |
| ret double %r |
| } |
| |
| define double @fmuladd_poison_op2(double %x, double %y) { |
| ; CHECK-LABEL: @fmuladd_poison_op2( |
| ; CHECK-NEXT: ret double poison |
| ; |
| %r = call double @llvm.fmuladd.f64(double %x, double %y, double poison) |
| ret double %r |
| } |
| |
| define double @fmuladd_nan_op0_poison_op1(double %x) { |
| ; CHECK-LABEL: @fmuladd_nan_op0_poison_op1( |
| ; CHECK-NEXT: ret double poison |
| ; |
| %r = call double @llvm.fmuladd.f64(double 0x7ff8000000000000, double poison, double %x) |
| ret double %r |
| } |
| |
| define double @fmuladd_nan_op1_poison_op2(double %x) { |
| ; CHECK-LABEL: @fmuladd_nan_op1_poison_op2( |
| ; CHECK-NEXT: ret double poison |
| ; |
| %r = call double @llvm.fmuladd.f64(double %x, double 0x7ff8000000000000, double poison) |
| ret double %r |
| } |
| |
| define double @fma_nan_op0(double %x, double %y) { |
| ; CHECK-LABEL: @fma_nan_op0( |
| ; CHECK-NEXT: ret double 0x7FF8000000000000 |
| ; |
| %r = call double @llvm.fma.f64(double 0x7ff8000000000000, double %x, double %y) |
| ret double %r |
| } |
| |
| define double @fma_nan_op1(double %x, double %y) { |
| ; CHECK-LABEL: @fma_nan_op1( |
| ; CHECK-NEXT: ret double 0x7FF8000000000001 |
| ; |
| %r = call double @llvm.fma.f64(double %x, double 0x7ff8000000000001, double %y) |
| ret double %r |
| } |
| |
| define double @fma_nan_op2(double %x, double %y) { |
| ; CHECK-LABEL: @fma_nan_op2( |
| ; CHECK-NEXT: ret double 0x7FF8000000000002 |
| ; |
| %r = call double @llvm.fma.f64(double %x, double %y, double 0x7ff8000000000002) |
| ret double %r |
| } |
| |
| define double @fmuladd_nan_op0_op1(double %x) { |
| ; CHECK-LABEL: @fmuladd_nan_op0_op1( |
| ; CHECK-NEXT: ret double 0x7FF8000000001234 |
| ; |
| %r = call double @llvm.fmuladd.f64(double 0x7ff8000000001234, double 0x7ff800000000dead, double %x) |
| ret double %r |
| } |
| |
| define double @fmuladd_nan_op0_op2(double %x) { |
| ; CHECK-LABEL: @fmuladd_nan_op0_op2( |
| ; CHECK-NEXT: ret double 0x7FF8000000005678 |
| ; |
| %r = call double @llvm.fmuladd.f64(double 0x7ff8000000005678, double %x, double 0x7ff800000000dead) |
| ret double %r |
| } |
| |
| define double @fmuladd_nan_op1_op2(double %x) { |
| ; CHECK-LABEL: @fmuladd_nan_op1_op2( |
| ; CHECK-NEXT: ret double 0x7FF80000AAAAAAAA |
| ; |
| %r = call double @llvm.fmuladd.f64(double %x, double 0x7ff80000aaaaaaaa, double 0x7ff800000000dead) |
| ret double %r |
| } |
| |
| define double @fma_nan_multiplicand_inf_zero(double %x) { |
| ; CHECK-LABEL: @fma_nan_multiplicand_inf_zero( |
| ; CHECK-NEXT: [[R:%.*]] = call double @llvm.fma.f64(double 0x7FF0000000000000, double 0.000000e+00, double [[X:%.*]]) |
| ; CHECK-NEXT: ret double [[R]] |
| ; |
| %r = call double @llvm.fma.f64(double 0x7ff0000000000000, double 0.0, double %x) |
| ret double %r |
| } |
| |
| define double @fma_nan_multiplicand_zero_inf(double %x) { |
| ; CHECK-LABEL: @fma_nan_multiplicand_zero_inf( |
| ; CHECK-NEXT: [[R:%.*]] = call double @llvm.fma.f64(double 0.000000e+00, double 0x7FF0000000000000, double [[X:%.*]]) |
| ; CHECK-NEXT: ret double [[R]] |
| ; |
| %r = call double @llvm.fma.f64(double 0.0, double 0x7ff0000000000000, double %x) |
| ret double %r |
| } |
| |
| define double @fma_nan_addend_inf_neginf(double %x, i32 %y) { |
| ; CHECK-LABEL: @fma_nan_addend_inf_neginf( |
| ; CHECK-NEXT: [[NOTNAN:%.*]] = uitofp i32 [[Y:%.*]] to double |
| ; CHECK-NEXT: [[R:%.*]] = call double @llvm.fma.f64(double 0x7FF0000000000000, double [[NOTNAN]], double 0xFFF0000000000000) |
| ; CHECK-NEXT: ret double [[R]] |
| ; |
| %notnan = uitofp i32 %y to double |
| %r = call double @llvm.fma.f64(double 0x7ff0000000000000, double %notnan, double 0xfff0000000000000) |
| ret double %r |
| } |
| |
| define double @fma_nan_addend_neginf_inf(double %x, i1 %y) { |
| ; CHECK-LABEL: @fma_nan_addend_neginf_inf( |
| ; CHECK-NEXT: [[NOTNAN:%.*]] = select i1 [[Y:%.*]], double 4.200000e+01, double -1.000000e-01 |
| ; CHECK-NEXT: [[R:%.*]] = call double @llvm.fma.f64(double [[NOTNAN]], double 0xFFF0000000000000, double 0x7FF0000000000000) |
| ; CHECK-NEXT: ret double [[R]] |
| ; |
| %notnan = select i1 %y, double 42.0, double -0.1 |
| %r = call double @llvm.fma.f64(double %notnan, double 0xfff0000000000000, double 0x7ff0000000000000) |
| ret double %r |
| } |
| |
| define double @fmuladd_nan_multiplicand_neginf_zero(double %x) { |
| ; CHECK-LABEL: @fmuladd_nan_multiplicand_neginf_zero( |
| ; CHECK-NEXT: [[R:%.*]] = call double @llvm.fmuladd.f64(double 0xFFF0000000000000, double 0.000000e+00, double [[X:%.*]]) |
| ; CHECK-NEXT: ret double [[R]] |
| ; |
| %r = call double @llvm.fmuladd.f64(double 0xfff0000000000000, double 0.0, double %x) |
| ret double %r |
| } |
| |
| define double @fmuladd_nan_multiplicand_negzero_inf(double %x) { |
| ; CHECK-LABEL: @fmuladd_nan_multiplicand_negzero_inf( |
| ; CHECK-NEXT: [[R:%.*]] = call double @llvm.fmuladd.f64(double -0.000000e+00, double 0x7FF0000000000000, double [[X:%.*]]) |
| ; CHECK-NEXT: ret double [[R]] |
| ; |
| %r = call double @llvm.fmuladd.f64(double -0.0, double 0x7ff0000000000000, double %x) |
| ret double %r |
| } |
| |
| define double @fmuladd_nan_addend_inf_neginf(double %x, i32 %y) { |
| ; CHECK-LABEL: @fmuladd_nan_addend_inf_neginf( |
| ; CHECK-NEXT: [[NOTNAN:%.*]] = sitofp i32 [[Y:%.*]] to double |
| ; CHECK-NEXT: [[R:%.*]] = call double @llvm.fmuladd.f64(double 0x7FF0000000000000, double [[NOTNAN]], double 0xFFF0000000000000) |
| ; CHECK-NEXT: ret double [[R]] |
| ; |
| %notnan = sitofp i32 %y to double |
| %r = call double @llvm.fmuladd.f64(double 0x7ff0000000000000, double %notnan, double 0xfff0000000000000) |
| ret double %r |
| } |
| |
| define double @fmuladd_nan_addend_neginf_inf(double %x, i1 %y) { |
| ; CHECK-LABEL: @fmuladd_nan_addend_neginf_inf( |
| ; CHECK-NEXT: [[NOTNAN:%.*]] = select i1 [[Y:%.*]], double 4.200000e+01, double -1.000000e-01 |
| ; CHECK-NEXT: [[R:%.*]] = call double @llvm.fmuladd.f64(double [[NOTNAN]], double 0xFFF0000000000000, double 0x7FF0000000000000) |
| ; CHECK-NEXT: ret double [[R]] |
| ; |
| %notnan = select i1 %y, double 42.0, double -0.1 |
| %r = call double @llvm.fmuladd.f64(double %notnan, double 0xfff0000000000000, double 0x7ff0000000000000) |
| ret double %r |
| } |
| |
| declare float @llvm.copysign.f32(float, float) |
| declare <2 x double> @llvm.copysign.v2f64(<2 x double>, <2 x double>) |
| |
| define float @copysign_same_operand(float %x) { |
| ; CHECK-LABEL: @copysign_same_operand( |
| ; CHECK-NEXT: ret float [[X:%.*]] |
| ; |
| %r = call float @llvm.copysign.f32(float %x, float %x) |
| ret float %r |
| } |
| |
| define <2 x double> @copysign_same_operand_vec(<2 x double> %x) { |
| ; CHECK-LABEL: @copysign_same_operand_vec( |
| ; CHECK-NEXT: ret <2 x double> [[X:%.*]] |
| ; |
| %r = call <2 x double> @llvm.copysign.v2f64(<2 x double> %x, <2 x double> %x) |
| ret <2 x double> %r |
| } |
| |
| define float @negated_sign_arg(float %x) { |
| ; CHECK-LABEL: @negated_sign_arg( |
| ; CHECK-NEXT: [[NEGX:%.*]] = fsub ninf float -0.000000e+00, [[X:%.*]] |
| ; CHECK-NEXT: ret float [[NEGX]] |
| ; |
| %negx = fsub ninf float -0.0, %x |
| %r = call arcp float @llvm.copysign.f32(float %x, float %negx) |
| ret float %r |
| } |
| |
| define <2 x double> @negated_sign_arg_vec(<2 x double> %x) { |
| ; CHECK-LABEL: @negated_sign_arg_vec( |
| ; CHECK-NEXT: [[NEGX:%.*]] = fneg afn <2 x double> [[X:%.*]] |
| ; CHECK-NEXT: ret <2 x double> [[NEGX]] |
| ; |
| %negx = fneg afn <2 x double> %x |
| %r = call arcp <2 x double> @llvm.copysign.v2f64(<2 x double> %x, <2 x double> %negx) |
| ret <2 x double> %r |
| } |
| |
| define float @negated_mag_arg(float %x) { |
| ; CHECK-LABEL: @negated_mag_arg( |
| ; CHECK-NEXT: ret float [[X:%.*]] |
| ; |
| %negx = fneg nnan float %x |
| %r = call ninf float @llvm.copysign.f32(float %negx, float %x) |
| ret float %r |
| } |
| |
| define <2 x double> @negated_mag_arg_vec(<2 x double> %x) { |
| ; CHECK-LABEL: @negated_mag_arg_vec( |
| ; CHECK-NEXT: ret <2 x double> [[X:%.*]] |
| ; |
| %negx = fneg afn <2 x double> %x |
| %r = call arcp <2 x double> @llvm.copysign.v2f64(<2 x double> %negx, <2 x double> %x) |
| ret <2 x double> %r |
| } |
| |
| ; We handle the "returned" attribute only in InstCombine, because the fact |
| ; that this simplification may replace one call with another may cause issues |
| ; for call graph passes. |
| |
| declare i32 @passthru_i32(i32 returned) |
| declare ptr @passthru_p8(ptr returned) |
| |
| define i32 @returned_const_int_arg() { |
| ; CHECK-LABEL: @returned_const_int_arg( |
| ; CHECK-NEXT: [[X:%.*]] = call i32 @passthru_i32(i32 42) |
| ; CHECK-NEXT: ret i32 [[X]] |
| ; |
| %x = call i32 @passthru_i32(i32 42) |
| ret i32 %x |
| } |
| |
| define ptr @returned_const_ptr_arg() { |
| ; CHECK-LABEL: @returned_const_ptr_arg( |
| ; CHECK-NEXT: [[X:%.*]] = call ptr @passthru_p8(ptr null) |
| ; CHECK-NEXT: ret ptr [[X]] |
| ; |
| %x = call ptr @passthru_p8(ptr null) |
| ret ptr %x |
| } |
| |
| define i32 @returned_var_arg(i32 %arg) { |
| ; CHECK-LABEL: @returned_var_arg( |
| ; CHECK-NEXT: [[X:%.*]] = call i32 @passthru_i32(i32 [[ARG:%.*]]) |
| ; CHECK-NEXT: ret i32 [[X]] |
| ; |
| %x = call i32 @passthru_i32(i32 %arg) |
| ret i32 %x |
| } |
| |
| define i32 @returned_const_int_arg_musttail(i32 %arg) { |
| ; CHECK-LABEL: @returned_const_int_arg_musttail( |
| ; CHECK-NEXT: [[X:%.*]] = musttail call i32 @passthru_i32(i32 42) |
| ; CHECK-NEXT: ret i32 [[X]] |
| ; |
| %x = musttail call i32 @passthru_i32(i32 42) |
| ret i32 %x |
| } |
| |
| define i32 @returned_var_arg_musttail(i32 %arg) { |
| ; CHECK-LABEL: @returned_var_arg_musttail( |
| ; CHECK-NEXT: [[X:%.*]] = musttail call i32 @passthru_i32(i32 [[ARG:%.*]]) |
| ; CHECK-NEXT: ret i32 [[X]] |
| ; |
| %x = musttail call i32 @passthru_i32(i32 %arg) |
| ret i32 %x |
| } |
| |
| define i32 @call_undef_musttail() { |
| ; CHECK-LABEL: @call_undef_musttail( |
| ; CHECK-NEXT: [[X:%.*]] = musttail call i32 undef() |
| ; CHECK-NEXT: ret i32 [[X]] |
| ; |
| %x = musttail call i32 undef() |
| ret i32 %x |
| } |
| |
| ; This is not the builtin fmax, so we don't know anything about its behavior. |
| |
| declare float @fmaxf(float, float) |
| |
| define float @nobuiltin_fmax() { |
| ; CHECK-LABEL: @nobuiltin_fmax( |
| ; CHECK-NEXT: [[M:%.*]] = call float @fmaxf(float 0.000000e+00, float 1.000000e+00) #[[ATTR3:[0-9]+]] |
| ; CHECK-NEXT: [[R:%.*]] = call float @llvm.fabs.f32(float [[M]]) |
| ; CHECK-NEXT: ret float [[R]] |
| ; |
| %m = call float @fmaxf(float 0.0, float 1.0) #0 |
| %r = call float @llvm.fabs.f32(float %m) |
| ret float %r |
| } |
| |
| |
| declare i32 @llvm.ctpop.i32(i32) |
| declare <3 x i33> @llvm.ctpop.v3i33(<3 x i33>) |
| declare i1 @llvm.ctpop.i1(i1) |
| declare i1 @llvm.ctlz.i1(i1, i1) |
| declare i1 @llvm.cttz.i1(i1, i1) |
| |
| define i32 @ctpop_lowbit(i32 %x) { |
| ; CHECK-LABEL: @ctpop_lowbit( |
| ; CHECK-NEXT: [[B:%.*]] = and i32 [[X:%.*]], 1 |
| ; CHECK-NEXT: ret i32 [[B]] |
| ; |
| %b = and i32 %x, 1 |
| %r = call i32 @llvm.ctpop.i32(i32 %b) |
| ret i32 %r |
| } |
| |
| ; Negative test - only low bit allowed |
| ; This could be reduced by instcombine to and+shift. |
| |
| define i32 @ctpop_pow2(i32 %x) { |
| ; CHECK-LABEL: @ctpop_pow2( |
| ; CHECK-NEXT: [[B:%.*]] = and i32 [[X:%.*]], 4 |
| ; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.ctpop.i32(i32 [[B]]) |
| ; CHECK-NEXT: ret i32 [[R]] |
| ; |
| %b = and i32 %x, 4 |
| %r = call i32 @llvm.ctpop.i32(i32 %b) |
| ret i32 %r |
| } |
| |
| define <3 x i33> @ctpop_signbit(<3 x i33> %x) { |
| ; CHECK-LABEL: @ctpop_signbit( |
| ; CHECK-NEXT: [[B:%.*]] = lshr <3 x i33> [[X:%.*]], <i33 32, i33 32, i33 32> |
| ; CHECK-NEXT: ret <3 x i33> [[B]] |
| ; |
| %b = lshr <3 x i33> %x, <i33 32, i33 32, i33 32> |
| %r = tail call <3 x i33> @llvm.ctpop.v3i33(<3 x i33> %b) |
| ret <3 x i33> %r |
| } |
| |
| ; Negative test - only 1 bit allowed |
| |
| define <3 x i33> @ctpop_notsignbit(<3 x i33> %x) { |
| ; CHECK-LABEL: @ctpop_notsignbit( |
| ; CHECK-NEXT: [[B:%.*]] = lshr <3 x i33> [[X:%.*]], <i33 31, i33 31, i33 31> |
| ; CHECK-NEXT: [[R:%.*]] = tail call <3 x i33> @llvm.ctpop.v3i33(<3 x i33> [[B]]) |
| ; CHECK-NEXT: ret <3 x i33> [[R]] |
| ; |
| %b = lshr <3 x i33> %x, <i33 31, i33 31, i33 31> |
| %r = tail call <3 x i33> @llvm.ctpop.v3i33(<3 x i33> %b) |
| ret <3 x i33> %r |
| } |
| |
| define i1 @ctpop_bool(i1 %x) { |
| ; CHECK-LABEL: @ctpop_bool( |
| ; CHECK-NEXT: ret i1 [[X:%.*]] |
| ; |
| %r = tail call i1 @llvm.ctpop.i1(i1 %x) |
| ret i1 %r |
| } |
| |
| declare i32 @llvm.cttz.i32(i32, i1) |
| declare <3 x i33> @llvm.cttz.v3i33(<3 x i33>, i1) |
| |
| define i32 @cttz_shl1(i32 %x) { |
| ; CHECK-LABEL: @cttz_shl1( |
| ; CHECK-NEXT: ret i32 [[X:%.*]] |
| ; |
| %s = shl i32 1, %x |
| %r = call i32 @llvm.cttz.i32(i32 %s, i1 true) |
| ret i32 %r |
| } |
| |
| define <3 x i33> @cttz_shl1_vec(<3 x i33> %x) { |
| ; CHECK-LABEL: @cttz_shl1_vec( |
| ; CHECK-NEXT: ret <3 x i33> [[X:%.*]] |
| ; |
| %s = shl <3 x i33> <i33 1, i33 1, i33 undef>, %x |
| %r = call <3 x i33> @llvm.cttz.v3i33(<3 x i33> %s, i1 false) |
| ret <3 x i33> %r |
| } |
| |
| ; Negative test - this could be generalized in instcombine though. |
| |
| define i32 @cttz_shl_not_low_bit(i32 %x) { |
| ; CHECK-LABEL: @cttz_shl_not_low_bit( |
| ; CHECK-NEXT: [[S:%.*]] = shl i32 2, [[X:%.*]] |
| ; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.cttz.i32(i32 [[S]], i1 true) |
| ; CHECK-NEXT: ret i32 [[R]] |
| ; |
| %s = shl i32 2, %x |
| %r = call i32 @llvm.cttz.i32(i32 %s, i1 true) |
| ret i32 %r |
| } |
| |
| declare i32 @llvm.ctlz.i32(i32, i1) |
| declare <3 x i33> @llvm.ctlz.v3i33(<3 x i33>, i1) |
| |
| define i32 @ctlz_lshr_sign_bit(i32 %x) { |
| ; CHECK-LABEL: @ctlz_lshr_sign_bit( |
| ; CHECK-NEXT: ret i32 [[X:%.*]] |
| ; |
| %s = lshr i32 2147483648, %x |
| %r = call i32 @llvm.ctlz.i32(i32 %s, i1 true) |
| ret i32 %r |
| } |
| |
| define i32 @ctlz_lshr_negative(i32 %x) { |
| ; CHECK-LABEL: @ctlz_lshr_negative( |
| ; CHECK-NEXT: ret i32 [[X:%.*]] |
| ; |
| %s = lshr i32 -42, %x |
| %r = call i32 @llvm.ctlz.i32(i32 %s, i1 true) |
| ret i32 %r |
| } |
| |
| define <3 x i33> @ctlz_lshr_sign_bit_vec(<3 x i33> %x) { |
| ; CHECK-LABEL: @ctlz_lshr_sign_bit_vec( |
| ; CHECK-NEXT: ret <3 x i33> [[X:%.*]] |
| ; |
| %s = lshr <3 x i33> <i33 undef, i33 4294967296, i33 4294967296>, %x |
| %r = call <3 x i33> @llvm.ctlz.v3i33(<3 x i33> %s, i1 false) |
| ret <3 x i33> %r |
| } |
| |
| ; Negative test - this could be generalized in instcombine though. |
| |
| define i32 @ctlz_lshr_not_negative(i32 %x) { |
| ; CHECK-LABEL: @ctlz_lshr_not_negative( |
| ; CHECK-NEXT: [[S:%.*]] = lshr i32 42, [[X:%.*]] |
| ; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.ctlz.i32(i32 [[S]], i1 true) |
| ; CHECK-NEXT: ret i32 [[R]] |
| ; |
| %s = lshr i32 42, %x |
| %r = call i32 @llvm.ctlz.i32(i32 %s, i1 true) |
| ret i32 %r |
| } |
| |
| define i32 @ctlz_ashr_sign_bit(i32 %x) { |
| ; CHECK-LABEL: @ctlz_ashr_sign_bit( |
| ; CHECK-NEXT: ret i32 0 |
| ; |
| %s = ashr i32 2147483648, %x |
| %r = call i32 @llvm.ctlz.i32(i32 %s, i1 false) |
| ret i32 %r |
| } |
| |
| define i32 @ctlz_ashr_negative(i32 %x) { |
| ; CHECK-LABEL: @ctlz_ashr_negative( |
| ; CHECK-NEXT: ret i32 0 |
| ; |
| %s = ashr i32 -42, %x |
| %r = call i32 @llvm.ctlz.i32(i32 %s, i1 false) |
| ret i32 %r |
| } |
| |
| define <3 x i33> @ctlz_ashr_sign_bit_vec(<3 x i33> %x) { |
| ; CHECK-LABEL: @ctlz_ashr_sign_bit_vec( |
| ; CHECK-NEXT: ret <3 x i33> zeroinitializer |
| ; |
| %s = ashr <3 x i33> <i33 4294967296, i33 undef, i33 4294967296>, %x |
| %r = call <3 x i33> @llvm.ctlz.v3i33(<3 x i33> %s, i1 true) |
| ret <3 x i33> %r |
| } |
| |
| declare ptr @llvm.ptrmask.p0.i64(ptr , i64) |
| |
| define i1 @capture_vs_recurse(i64 %mask) { |
| ; CHECK-LABEL: @capture_vs_recurse( |
| ; CHECK-NEXT: [[A:%.*]] = call noalias ptr @malloc(i64 8) |
| ; CHECK-NEXT: [[B:%.*]] = call nonnull ptr @llvm.ptrmask.p0.i64(ptr [[A]], i64 [[MASK:%.*]]) |
| ; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[A]], [[B]] |
| ; CHECK-NEXT: ret i1 [[CMP]] |
| ; |
| %a = call noalias ptr @malloc(i64 8) |
| %b = call nonnull ptr @llvm.ptrmask.p0.i64(ptr %a, i64 %mask) |
| %cmp = icmp eq ptr %a, %b |
| ret i1 %cmp |
| } |
| |
| define i1 @ctlz_i1_non_poison_eq_false(i1 %x) { |
| ; CHECK-LABEL: @ctlz_i1_non_poison_eq_false( |
| ; CHECK-NEXT: [[CT:%.*]] = call i1 @llvm.ctlz.i1(i1 [[X:%.*]], i1 false) |
| ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i1 [[CT]], false |
| ; CHECK-NEXT: ret i1 [[CMP]] |
| ; |
| %ct = call i1 @llvm.ctlz.i1(i1 %x, i1 false) |
| %cmp = icmp eq i1 %ct, false |
| ret i1 %cmp |
| } |
| |
| define i1 @ctlz_i1_poison_eq_false(i1 %x) { |
| ; CHECK-LABEL: @ctlz_i1_poison_eq_false( |
| ; CHECK-NEXT: [[CT:%.*]] = call i1 @llvm.ctlz.i1(i1 [[X:%.*]], i1 true) |
| ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i1 [[CT]], false |
| ; CHECK-NEXT: ret i1 [[CMP]] |
| ; |
| %ct = call i1 @llvm.ctlz.i1(i1 %x, i1 true) |
| %cmp = icmp eq i1 %ct, false |
| ret i1 %cmp |
| } |
| |
| define i1 @cttz_i1_non_poison_eq_false(i1 %x) { |
| ; CHECK-LABEL: @cttz_i1_non_poison_eq_false( |
| ; CHECK-NEXT: [[CT:%.*]] = call i1 @llvm.cttz.i1(i1 [[X:%.*]], i1 false) |
| ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i1 [[CT]], false |
| ; CHECK-NEXT: ret i1 [[CMP]] |
| ; |
| %ct = call i1 @llvm.cttz.i1(i1 %x, i1 false) |
| %cmp = icmp eq i1 %ct, false |
| ret i1 %cmp |
| } |
| |
| define i1 @cttz_i1_poison_eq_false(i1 %x) { |
| ; CHECK-LABEL: @cttz_i1_poison_eq_false( |
| ; CHECK-NEXT: [[CT:%.*]] = call i1 @llvm.cttz.i1(i1 [[X:%.*]], i1 true) |
| ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i1 [[CT]], false |
| ; CHECK-NEXT: ret i1 [[CMP]] |
| ; |
| %ct = call i1 @llvm.cttz.i1(i1 %x, i1 true) |
| %cmp = icmp eq i1 %ct, false |
| ret i1 %cmp |
| } |
| |
| define i1 @ctpop_i1_non_poison_eq_false(i1 %x) { |
| ; CHECK-LABEL: @ctpop_i1_non_poison_eq_false( |
| ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i1 [[X:%.*]], false |
| ; CHECK-NEXT: ret i1 [[CMP]] |
| ; |
| %ct = call i1 @llvm.ctpop.i1(i1 %x) |
| %cmp = icmp eq i1 %ct, false |
| ret i1 %cmp |
| } |
| |
| attributes #0 = { nobuiltin readnone } |