| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc %s -mtriple=aarch64 -mattr=+v8.3a,+sm4 -o - | FileCheck %s |
| |
| define <4 x i32> @test_vsm3partw1(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) { |
| ; CHECK-LABEL: test_vsm3partw1: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: sm3partw1 v0.4s, v1.4s, v2.4s |
| ; CHECK-NEXT: ret |
| entry: |
| %vsm3partw1.i = tail call <4 x i32> @llvm.aarch64.crypto.sm3partw1(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) |
| ret <4 x i32> %vsm3partw1.i |
| } |
| |
| define <4 x i32> @test_vsm3partw2(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) { |
| ; CHECK-LABEL: test_vsm3partw2: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: sm3partw2 v0.4s, v1.4s, v2.4s |
| ; CHECK-NEXT: ret |
| entry: |
| %vsm3partw2.i = tail call <4 x i32> @llvm.aarch64.crypto.sm3partw2(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) |
| ret <4 x i32> %vsm3partw2.i |
| } |
| |
| define <4 x i32> @test_vsm3ss1(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) { |
| ; CHECK-LABEL: test_vsm3ss1: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: sm3ss1 v0.4s, v0.4s, v1.4s, v2.4s |
| ; CHECK-NEXT: ret |
| entry: |
| %vsm3ss1.i = tail call <4 x i32> @llvm.aarch64.crypto.sm3ss1(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) |
| ret <4 x i32> %vsm3ss1.i |
| } |
| |
| define <4 x i32> @test_vsm3tt1a(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) { |
| ; CHECK-LABEL: test_vsm3tt1a: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: sm3tt1a v0.4s, v1.4s, v2.s[2] |
| ; CHECK-NEXT: ret |
| entry: |
| %vsm3tt1a.i = tail call <4 x i32> @llvm.aarch64.crypto.sm3tt1a(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, i64 2) |
| ret <4 x i32> %vsm3tt1a.i |
| } |
| |
| define <4 x i32> @test_vsm3tt1b(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) { |
| ; CHECK-LABEL: test_vsm3tt1b: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: sm3tt1b v0.4s, v1.4s, v2.s[2] |
| ; CHECK-NEXT: ret |
| entry: |
| %vsm3tt1b.i = tail call <4 x i32> @llvm.aarch64.crypto.sm3tt1b(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, i64 2) |
| ret <4 x i32> %vsm3tt1b.i |
| } |
| |
| define <4 x i32> @test_vsm3tt2a(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) { |
| ; CHECK-LABEL: test_vsm3tt2a: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: sm3tt2a v0.4s, v1.4s, v2.s[2] |
| ; CHECK-NEXT: ret |
| entry: |
| %vsm3tt2a.i = tail call <4 x i32> @llvm.aarch64.crypto.sm3tt2a(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, i64 2) |
| ret <4 x i32> %vsm3tt2a.i |
| } |
| |
| define <4 x i32> @test_vsm3tt2b(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) { |
| ; CHECK-LABEL: test_vsm3tt2b: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: sm3tt2b v0.4s, v1.4s, v2.s[2] |
| ; CHECK-NEXT: ret |
| entry: |
| %vsm3tt2b.i = tail call <4 x i32> @llvm.aarch64.crypto.sm3tt2b(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, i64 2) |
| ret <4 x i32> %vsm3tt2b.i |
| } |
| |
| define <4 x i32> @test_vsm4e(<4 x i32> %a, <4 x i32> %b) { |
| ; CHECK-LABEL: test_vsm4e: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: sm4e v0.4s, v1.4s |
| ; CHECK-NEXT: ret |
| entry: |
| %vsm4e.i = tail call <4 x i32> @llvm.aarch64.crypto.sm4e(<4 x i32> %a, <4 x i32> %b) |
| ret <4 x i32> %vsm4e.i |
| } |
| |
| define <4 x i32> @test_vsm4ekey(<4 x i32> %a, <4 x i32> %b) { |
| ; CHECK-LABEL: test_vsm4ekey: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: sm4ekey v0.4s, v0.4s, v1.4s |
| ; CHECK-NEXT: ret |
| entry: |
| %vsm4ekey.i = tail call <4 x i32> @llvm.aarch64.crypto.sm4ekey(<4 x i32> %a, <4 x i32> %b) |
| ret <4 x i32> %vsm4ekey.i |
| } |
| |
| declare <4 x i32> @llvm.aarch64.crypto.sm3partw1(<4 x i32>, <4 x i32>, <4 x i32>) |
| declare <4 x i32> @llvm.aarch64.crypto.sm3partw2(<4 x i32>, <4 x i32>, <4 x i32>) |
| declare <4 x i32> @llvm.aarch64.crypto.sm3ss1(<4 x i32>, <4 x i32>, <4 x i32>) |
| declare <4 x i32> @llvm.aarch64.crypto.sm3tt1a(<4 x i32>, <4 x i32>, <4 x i32>, i64 immarg) |
| declare <4 x i32> @llvm.aarch64.crypto.sm3tt2b(<4 x i32>, <4 x i32>, <4 x i32>, i64 immarg) |
| declare <4 x i32> @llvm.aarch64.crypto.sm3tt2a(<4 x i32>, <4 x i32>, <4 x i32>, i64 immarg) |
| declare <4 x i32> @llvm.aarch64.crypto.sm3tt1b(<4 x i32>, <4 x i32>, <4 x i32>, i64 immarg) |
| declare <4 x i32> @llvm.aarch64.crypto.sm4e(<4 x i32>, <4 x i32>) |
| declare <4 x i32> @llvm.aarch64.crypto.sm4ekey(<4 x i32>, <4 x i32>) |