blob: 51e6c2836a20ffc9b70832d7c9f55880fc355ad5 [file] [log] [blame]
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mcpu=mvp -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers -tail-dup-placement=0 | FileCheck %s
; Test memcpy, memmove, and memset intrinsics.
target triple = "wasm32-unknown-unknown"
declare void @llvm.memcpy.p0.p0.i32(ptr nocapture, ptr nocapture readonly, i32, i1)
declare void @llvm.memmove.p0.p0.i32(ptr nocapture, ptr nocapture readonly, i32, i1)
declare void @llvm.memset.p0.i32(ptr nocapture, i8, i32, i1)
; Test that return values are optimized.
define ptr @copy_yes(ptr %dst, ptr %src, i32 %len) {
; CHECK-LABEL: copy_yes:
; CHECK: .functype copy_yes (i32, i32, i32) -> (i32)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: call $push0=, memcpy, $0, $1, $2
; CHECK-NEXT: return $pop0
call void @llvm.memcpy.p0.p0.i32(ptr %dst, ptr %src, i32 %len, i1 false)
ret ptr %dst
}
define void @copy_no(ptr %dst, ptr %src, i32 %len) {
; CHECK-LABEL: copy_no:
; CHECK: .functype copy_no (i32, i32, i32) -> ()
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: call $drop=, memcpy, $0, $1, $2
; CHECK-NEXT: return
call void @llvm.memcpy.p0.p0.i32(ptr %dst, ptr %src, i32 %len, i1 false)
ret void
}
define ptr @move_yes(ptr %dst, ptr %src, i32 %len) {
; CHECK-LABEL: move_yes:
; CHECK: .functype move_yes (i32, i32, i32) -> (i32)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: call $push0=, memmove, $0, $1, $2
; CHECK-NEXT: return $pop0
call void @llvm.memmove.p0.p0.i32(ptr %dst, ptr %src, i32 %len, i1 false)
ret ptr %dst
}
define void @move_no(ptr %dst, ptr %src, i32 %len) {
; CHECK-LABEL: move_no:
; CHECK: .functype move_no (i32, i32, i32) -> ()
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: call $drop=, memmove, $0, $1, $2
; CHECK-NEXT: return
call void @llvm.memmove.p0.p0.i32(ptr %dst, ptr %src, i32 %len, i1 false)
ret void
}
define ptr @set_yes(ptr %dst, i8 %src, i32 %len) {
; CHECK-LABEL: set_yes:
; CHECK: .functype set_yes (i32, i32, i32) -> (i32)
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: call $push0=, memset, $0, $1, $2
; CHECK-NEXT: return $pop0
call void @llvm.memset.p0.i32(ptr %dst, i8 %src, i32 %len, i1 false)
ret ptr %dst
}
define void @set_no(ptr %dst, i8 %src, i32 %len) {
; CHECK-LABEL: set_no:
; CHECK: .functype set_no (i32, i32, i32) -> ()
; CHECK-NEXT: # %bb.0:
; CHECK-NEXT: call $drop=, memset, $0, $1, $2
; CHECK-NEXT: return
call void @llvm.memset.p0.i32(ptr %dst, i8 %src, i32 %len, i1 false)
ret void
}
define void @frame_index() {
; CHECK-LABEL: frame_index:
; CHECK: .functype frame_index () -> ()
; CHECK-NEXT: # %bb.0: # %entry
; CHECK-NEXT: global.get $push3=, __stack_pointer
; CHECK-NEXT: i32.const $push4=, 4096
; CHECK-NEXT: i32.sub $push12=, $pop3, $pop4
; CHECK-NEXT: local.tee $push11=, $0=, $pop12
; CHECK-NEXT: global.set __stack_pointer, $pop11
; CHECK-NEXT: i32.const $push7=, 2048
; CHECK-NEXT: i32.add $push8=, $0, $pop7
; CHECK-NEXT: i32.const $push1=, 0
; CHECK-NEXT: i32.const $push0=, 1024
; CHECK-NEXT: call $drop=, memset, $pop8, $pop1, $pop0
; CHECK-NEXT: i32.const $push10=, 0
; CHECK-NEXT: i32.const $push9=, 1024
; CHECK-NEXT: call $push2=, memset, $0, $pop10, $pop9
; CHECK-NEXT: i32.const $push5=, 4096
; CHECK-NEXT: i32.add $push6=, $pop2, $pop5
; CHECK-NEXT: global.set __stack_pointer, $pop6
; CHECK-NEXT: return
entry:
%a = alloca [2048 x i8], align 16
%b = alloca [2048 x i8], align 16
call void @llvm.memset.p0.i32(ptr align 16 %a, i8 256, i32 1024, i1 false)
call void @llvm.memset.p0.i32(ptr align 16 %b, i8 256, i32 1024, i1 false)
ret void
}
; If the result value of memset doesn't get stackified, it should be marked
; $drop. Note that we use a call to prevent tail dup so that we can test
; this specific functionality.
declare ptr @def()
declare void @block_tail_dup()
define ptr @drop_result(ptr %arg, i8 %arg1, i32 %arg2, i32 %arg3, i32 %arg4) {
; CHECK-LABEL: drop_result:
; CHECK: .functype drop_result (i32, i32, i32, i32, i32) -> (i32)
; CHECK-NEXT: # %bb.0: # %bb
; CHECK-NEXT: block
; CHECK-NEXT: block
; CHECK-NEXT: br_if 0, $3 # 0: down to label1
; CHECK-NEXT: # %bb.1: # %bb5
; CHECK-NEXT: br_if 1, $4 # 1: down to label0
; CHECK-NEXT: # %bb.2: # %bb7
; CHECK-NEXT: call $drop=, memset, $0, $1, $2
; CHECK-NEXT: call block_tail_dup
; CHECK-NEXT: return $0
; CHECK-NEXT: .LBB7_3: # %bb9
; CHECK-NEXT: end_block # label1:
; CHECK-NEXT: call $0=, def
; CHECK-NEXT: .LBB7_4: # %bb11
; CHECK-NEXT: end_block # label0:
; CHECK-NEXT: call block_tail_dup
; CHECK-NEXT: return $0
bb:
%tmp = icmp eq i32 %arg3, 0
br i1 %tmp, label %bb5, label %bb9
bb5:
%tmp6 = icmp eq i32 %arg4, 0
br i1 %tmp6, label %bb7, label %bb8
bb7:
call void @llvm.memset.p0.i32(ptr %arg, i8 %arg1, i32 %arg2, i1 false)
br label %bb11
bb8:
br label %bb11
bb9:
%tmp10 = call ptr @def()
br label %bb11
bb11:
%tmp12 = phi ptr [ %arg, %bb7 ], [ %arg, %bb8 ], [ %tmp10, %bb9 ]
call void @block_tail_dup()
ret ptr %tmp12
}
; This is the same as drop_result, except we let tail dup happen, so the
; result of the memset *is* stackified.
define ptr @tail_dup_to_reuse_result(ptr %arg, i8 %arg1, i32 %arg2, i32 %arg3, i32 %arg4) {
; CHECK-LABEL: tail_dup_to_reuse_result:
; CHECK: .functype tail_dup_to_reuse_result (i32, i32, i32, i32, i32) -> (i32)
; CHECK-NEXT: # %bb.0: # %bb
; CHECK-NEXT: block
; CHECK-NEXT: block
; CHECK-NEXT: br_if 0, $3 # 0: down to label3
; CHECK-NEXT: # %bb.1: # %bb5
; CHECK-NEXT: br_if 1, $4 # 1: down to label2
; CHECK-NEXT: # %bb.2: # %bb7
; CHECK-NEXT: call $push0=, memset, $0, $1, $2
; CHECK-NEXT: return $pop0
; CHECK-NEXT: .LBB8_3: # %bb9
; CHECK-NEXT: end_block # label3:
; CHECK-NEXT: call $0=, def
; CHECK-NEXT: .LBB8_4: # %bb11
; CHECK-NEXT: end_block # label2:
; CHECK-NEXT: return $0
bb:
%tmp = icmp eq i32 %arg3, 0
br i1 %tmp, label %bb5, label %bb9
bb5:
%tmp6 = icmp eq i32 %arg4, 0
br i1 %tmp6, label %bb7, label %bb8
bb7:
call void @llvm.memset.p0.i32(ptr %arg, i8 %arg1, i32 %arg2, i1 false)
br label %bb11
bb8:
br label %bb11
bb9:
%tmp10 = call ptr @def()
br label %bb11
bb11:
%tmp12 = phi ptr [ %arg, %bb7 ], [ %arg, %bb8 ], [ %tmp10, %bb9 ]
ret ptr %tmp12
}