blob: 51673282bd8ff6f145730b09468eb6d28b3bdc10 [file] [log] [blame] [edit]
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1 < %s | FileCheck %s
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme2 -force-streaming < %s | FileCheck %s
define <vscale x 4 x i32> @udot_vl128(<vscale x 4 x i32> %acc, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
; CHECK-LABEL: udot_vl128:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: udot z0.s, z1.h, z2.h
; CHECK-NEXT: ret
entry:
%a.wide = zext <vscale x 8 x i16> %a to <vscale x 8 x i32>
%b.wide = zext <vscale x 8 x i16> %b to <vscale x 8 x i32>
%mult = mul nuw nsw <vscale x 8 x i32> %a.wide, %b.wide
%partial.reduce = tail call <vscale x 4 x i32> @llvm.vector.partial.reduce.add(<vscale x 4 x i32> %acc, <vscale x 8 x i32> %mult)
ret <vscale x 4 x i32> %partial.reduce
}
define <vscale x 4 x i32> @sdot_vl128(<vscale x 4 x i32> %acc, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
; CHECK-LABEL: sdot_vl128:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: sdot z0.s, z1.h, z2.h
; CHECK-NEXT: ret
entry:
%a.wide = sext <vscale x 8 x i16> %a to <vscale x 8 x i32>
%b.wide = sext <vscale x 8 x i16> %b to <vscale x 8 x i32>
%mult = mul nuw nsw <vscale x 8 x i32> %a.wide, %b.wide
%partial.reduce = tail call <vscale x 4 x i32> @llvm.vector.partial.reduce.add(<vscale x 4 x i32> %acc, <vscale x 8 x i32> %mult)
ret <vscale x 4 x i32> %partial.reduce
}
define void @udot_vl256(ptr %accptr, ptr %aptr, ptr %bptr) vscale_range(2,2) {
; CHECK-LABEL: udot_vl256:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: ldr z0, [x0]
; CHECK-NEXT: ldr z1, [x1]
; CHECK-NEXT: ldr z2, [x2]
; CHECK-NEXT: udot z0.s, z1.h, z2.h
; CHECK-NEXT: str z0, [x0]
; CHECK-NEXT: ret
entry:
%acc = load <8 x i32>, ptr %accptr
%a = load <16 x i16>, ptr %aptr
%b = load <16 x i16>, ptr %bptr
%a.wide = zext <16 x i16> %a to <16 x i32>
%b.wide = zext <16 x i16> %b to <16 x i32>
%mult = mul nuw nsw <16 x i32> %a.wide, %b.wide
%partial.reduce = tail call <8 x i32> @llvm.vector.partial.reduce.add(<8 x i32> %acc, <16 x i32> %mult)
store <8 x i32> %partial.reduce, ptr %accptr
ret void
}
define void @sdot_vl256(ptr %accptr, ptr %aptr, ptr %bptr) vscale_range(2,2) {
; CHECK-LABEL: sdot_vl256:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: ldr z0, [x0]
; CHECK-NEXT: ldr z1, [x1]
; CHECK-NEXT: ldr z2, [x2]
; CHECK-NEXT: sdot z0.s, z1.h, z2.h
; CHECK-NEXT: str z0, [x0]
; CHECK-NEXT: ret
entry:
%acc = load <8 x i32>, ptr %accptr
%a = load <16 x i16>, ptr %aptr
%b = load <16 x i16>, ptr %bptr
%a.wide = sext <16 x i16> %a to <16 x i32>
%b.wide = sext <16 x i16> %b to <16 x i32>
%mult = mul nuw nsw <16 x i32> %a.wide, %b.wide
%partial.reduce = tail call <8 x i32> @llvm.vector.partial.reduce.add(<8 x i32> %acc, <16 x i32> %mult)
store <8 x i32> %partial.reduce, ptr %accptr
ret void
}
define <4 x i32> @fixed_udot_s_h(<4 x i32> %acc, <8 x i16> %a, <8 x i16> %b) {
; CHECK-LABEL: fixed_udot_s_h:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
; CHECK-NEXT: // kill: def $q2 killed $q2 def $z2
; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1
; CHECK-NEXT: udot z0.s, z1.h, z2.h
; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
; CHECK-NEXT: ret
entry:
%a.wide = zext <8 x i16> %a to <8 x i32>
%b.wide = zext <8 x i16> %b to <8 x i32>
%mult = mul nuw nsw <8 x i32> %a.wide, %b.wide
%partial.reduce = tail call <4 x i32> @llvm.vector.partial.reduce.add(<4 x i32> %acc, <8 x i32> %mult)
ret <4 x i32> %partial.reduce
}
define <4 x i32> @fixed_sdot_s_h(<4 x i32> %acc, <8 x i16> %a, <8 x i16> %b) {
; CHECK-LABEL: fixed_sdot_s_h:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
; CHECK-NEXT: // kill: def $q2 killed $q2 def $z2
; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1
; CHECK-NEXT: sdot z0.s, z1.h, z2.h
; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
; CHECK-NEXT: ret
entry:
%a.wide = sext <8 x i16> %a to <8 x i32>
%b.wide = sext <8 x i16> %b to <8 x i32>
%mult = mul nuw nsw <8 x i32> %a.wide, %b.wide
%partial.reduce = tail call <4 x i32> @llvm.vector.partial.reduce.add(<4 x i32> %acc, <8 x i32> %mult)
ret <4 x i32> %partial.reduce
}