| ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 |
| ; RUN: opt -S -passes=instcombine < %s | FileCheck %s |
| target triple = "aarch64-unknown-linux-gnu" |
| |
| define <vscale x 16 x i1> @test_cmpeq(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b){ |
| ; CHECK-LABEL: define <vscale x 16 x i1> @test_cmpeq( |
| ; CHECK-SAME: <vscale x 16 x i8> [[A:%.*]], <vscale x 16 x i8> [[B:%.*]]) { |
| ; CHECK-NEXT: [[ENTRY:.*:]] |
| ; CHECK-NEXT: ret <vscale x 16 x i1> zeroinitializer |
| ; |
| entry: |
| %0 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.cmpeq.nxv16i8(<vscale x 16 x i1> zeroinitializer, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) |
| ret <vscale x 16 x i1> %0 |
| } |
| |
| define <vscale x 16 x i1> @test_cmpeq_wide(<vscale x 16 x i8> %a, <vscale x 2 x i64> %b){ |
| ; CHECK-LABEL: define <vscale x 16 x i1> @test_cmpeq_wide( |
| ; CHECK-SAME: <vscale x 16 x i8> [[A:%.*]], <vscale x 2 x i64> [[B:%.*]]) { |
| ; CHECK-NEXT: [[ENTRY:.*:]] |
| ; CHECK-NEXT: ret <vscale x 16 x i1> zeroinitializer |
| ; |
| entry: |
| %0 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.cmpeq.wide.nxv16i8(<vscale x 16 x i1> zeroinitializer, <vscale x 16 x i8> %a, <vscale x 2 x i64> %b) |
| ret <vscale x 16 x i1> %0 |
| } |
| |
| define <vscale x 16 x i1> @test_cmpge(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b){ |
| ; CHECK-LABEL: define <vscale x 16 x i1> @test_cmpge( |
| ; CHECK-SAME: <vscale x 16 x i8> [[A:%.*]], <vscale x 16 x i8> [[B:%.*]]) { |
| ; CHECK-NEXT: [[ENTRY:.*:]] |
| ; CHECK-NEXT: ret <vscale x 16 x i1> zeroinitializer |
| ; |
| entry: |
| %0 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.cmpge.nxv16i8(<vscale x 16 x i1> zeroinitializer, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) |
| ret <vscale x 16 x i1> %0 |
| } |
| |
| define <vscale x 16 x i1> @test_cmpge_wide(<vscale x 16 x i8> %a, <vscale x 2 x i64> %b){ |
| ; CHECK-LABEL: define <vscale x 16 x i1> @test_cmpge_wide( |
| ; CHECK-SAME: <vscale x 16 x i8> [[A:%.*]], <vscale x 2 x i64> [[B:%.*]]) { |
| ; CHECK-NEXT: [[ENTRY:.*:]] |
| ; CHECK-NEXT: ret <vscale x 16 x i1> zeroinitializer |
| ; |
| entry: |
| %0 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.cmpge.wide.nxv16i8(<vscale x 16 x i1> zeroinitializer, <vscale x 16 x i8> %a, <vscale x 2 x i64> %b) |
| ret <vscale x 16 x i1> %0 |
| } |
| |
| define <vscale x 16 x i1> @test_cmpgt(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b){ |
| ; CHECK-LABEL: define <vscale x 16 x i1> @test_cmpgt( |
| ; CHECK-SAME: <vscale x 16 x i8> [[A:%.*]], <vscale x 16 x i8> [[B:%.*]]) { |
| ; CHECK-NEXT: [[ENTRY:.*:]] |
| ; CHECK-NEXT: ret <vscale x 16 x i1> zeroinitializer |
| ; |
| entry: |
| %0 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.cmpgt.nxv16i8(<vscale x 16 x i1> zeroinitializer, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) |
| ret <vscale x 16 x i1> %0 |
| } |
| |
| define <vscale x 16 x i1> @test_cmpgt_wide(<vscale x 16 x i8> %a, <vscale x 2 x i64> %b){ |
| ; CHECK-LABEL: define <vscale x 16 x i1> @test_cmpgt_wide( |
| ; CHECK-SAME: <vscale x 16 x i8> [[A:%.*]], <vscale x 2 x i64> [[B:%.*]]) { |
| ; CHECK-NEXT: [[ENTRY:.*:]] |
| ; CHECK-NEXT: ret <vscale x 16 x i1> zeroinitializer |
| ; |
| entry: |
| %0 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.cmpgt.wide.nxv16i8(<vscale x 16 x i1> zeroinitializer, <vscale x 16 x i8> %a, <vscale x 2 x i64> %b) |
| ret <vscale x 16 x i1> %0 |
| } |
| |
| define <vscale x 16 x i1> @test_cmphi(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b){ |
| ; CHECK-LABEL: define <vscale x 16 x i1> @test_cmphi( |
| ; CHECK-SAME: <vscale x 16 x i8> [[A:%.*]], <vscale x 16 x i8> [[B:%.*]]) { |
| ; CHECK-NEXT: [[ENTRY:.*:]] |
| ; CHECK-NEXT: ret <vscale x 16 x i1> zeroinitializer |
| ; |
| entry: |
| %0 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.cmphi.nxv16i8(<vscale x 16 x i1> zeroinitializer, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) |
| ret <vscale x 16 x i1> %0 |
| } |
| |
| define <vscale x 16 x i1> @test_cmphi_wide(<vscale x 16 x i8> %a, <vscale x 2 x i64> %b){ |
| ; CHECK-LABEL: define <vscale x 16 x i1> @test_cmphi_wide( |
| ; CHECK-SAME: <vscale x 16 x i8> [[A:%.*]], <vscale x 2 x i64> [[B:%.*]]) { |
| ; CHECK-NEXT: [[ENTRY:.*:]] |
| ; CHECK-NEXT: ret <vscale x 16 x i1> zeroinitializer |
| ; |
| entry: |
| %0 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.cmphi.wide.nxv16i8(<vscale x 16 x i1> zeroinitializer, <vscale x 16 x i8> %a, <vscale x 2 x i64> %b) |
| ret <vscale x 16 x i1> %0 |
| } |
| |
| define <vscale x 16 x i1> @test_cmphs(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b){ |
| ; CHECK-LABEL: define <vscale x 16 x i1> @test_cmphs( |
| ; CHECK-SAME: <vscale x 16 x i8> [[A:%.*]], <vscale x 16 x i8> [[B:%.*]]) { |
| ; CHECK-NEXT: [[ENTRY:.*:]] |
| ; CHECK-NEXT: ret <vscale x 16 x i1> zeroinitializer |
| ; |
| entry: |
| %0 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.cmphs.nxv16i8(<vscale x 16 x i1> zeroinitializer, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) |
| ret <vscale x 16 x i1> %0 |
| } |
| |
| define <vscale x 16 x i1> @test_cmphs_wide(<vscale x 16 x i8> %a, <vscale x 2 x i64> %b){ |
| ; CHECK-LABEL: define <vscale x 16 x i1> @test_cmphs_wide( |
| ; CHECK-SAME: <vscale x 16 x i8> [[A:%.*]], <vscale x 2 x i64> [[B:%.*]]) { |
| ; CHECK-NEXT: [[ENTRY:.*:]] |
| ; CHECK-NEXT: ret <vscale x 16 x i1> zeroinitializer |
| ; |
| entry: |
| %0 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.cmphs.wide.nxv16i8(<vscale x 16 x i1> zeroinitializer, <vscale x 16 x i8> %a, <vscale x 2 x i64> %b) |
| ret <vscale x 16 x i1> %0 |
| } |
| |
| define <vscale x 16 x i1> @test_cmple_wide(<vscale x 16 x i8> %a, <vscale x 2 x i64> %b){ |
| ; CHECK-LABEL: define <vscale x 16 x i1> @test_cmple_wide( |
| ; CHECK-SAME: <vscale x 16 x i8> [[A:%.*]], <vscale x 2 x i64> [[B:%.*]]) { |
| ; CHECK-NEXT: [[ENTRY:.*:]] |
| ; CHECK-NEXT: ret <vscale x 16 x i1> zeroinitializer |
| ; |
| entry: |
| %0 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.cmple.wide.nxv16i8(<vscale x 16 x i1> zeroinitializer, <vscale x 16 x i8> %a, <vscale x 2 x i64> %b) |
| ret <vscale x 16 x i1> %0 |
| } |
| |
| define <vscale x 16 x i1> @test_cmplo_wide(<vscale x 16 x i8> %a, <vscale x 2 x i64> %b){ |
| ; CHECK-LABEL: define <vscale x 16 x i1> @test_cmplo_wide( |
| ; CHECK-SAME: <vscale x 16 x i8> [[A:%.*]], <vscale x 2 x i64> [[B:%.*]]) { |
| ; CHECK-NEXT: [[ENTRY:.*:]] |
| ; CHECK-NEXT: ret <vscale x 16 x i1> zeroinitializer |
| ; |
| entry: |
| %0 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.cmplo.wide.nxv16i8(<vscale x 16 x i1> zeroinitializer, <vscale x 16 x i8> %a, <vscale x 2 x i64> %b) |
| ret <vscale x 16 x i1> %0 |
| } |
| |
| define <vscale x 16 x i1> @test_cmpls_wide(<vscale x 16 x i8> %a, <vscale x 2 x i64> %b){ |
| ; CHECK-LABEL: define <vscale x 16 x i1> @test_cmpls_wide( |
| ; CHECK-SAME: <vscale x 16 x i8> [[A:%.*]], <vscale x 2 x i64> [[B:%.*]]) { |
| ; CHECK-NEXT: [[ENTRY:.*:]] |
| ; CHECK-NEXT: ret <vscale x 16 x i1> zeroinitializer |
| ; |
| entry: |
| %0 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.cmpls.wide.nxv16i8(<vscale x 16 x i1> zeroinitializer, <vscale x 16 x i8> %a, <vscale x 2 x i64> %b) |
| ret <vscale x 16 x i1> %0 |
| } |
| |
| define <vscale x 16 x i1> @test_cmplt_wide(<vscale x 16 x i8> %a, <vscale x 2 x i64> %b){ |
| ; CHECK-LABEL: define <vscale x 16 x i1> @test_cmplt_wide( |
| ; CHECK-SAME: <vscale x 16 x i8> [[A:%.*]], <vscale x 2 x i64> [[B:%.*]]) { |
| ; CHECK-NEXT: [[ENTRY:.*:]] |
| ; CHECK-NEXT: ret <vscale x 16 x i1> zeroinitializer |
| ; |
| entry: |
| %0 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.cmplt.wide.nxv16i8(<vscale x 16 x i1> zeroinitializer, <vscale x 16 x i8> %a, <vscale x 2 x i64> %b) |
| ret <vscale x 16 x i1> %0 |
| } |
| |
| define <vscale x 16 x i1> @test_cmpne(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b){ |
| ; CHECK-LABEL: define <vscale x 16 x i1> @test_cmpne( |
| ; CHECK-SAME: <vscale x 16 x i8> [[A:%.*]], <vscale x 16 x i8> [[B:%.*]]) { |
| ; CHECK-NEXT: [[ENTRY:.*:]] |
| ; CHECK-NEXT: ret <vscale x 16 x i1> zeroinitializer |
| ; |
| entry: |
| %0 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.cmpne.nxv16i8(<vscale x 16 x i1> zeroinitializer, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) |
| ret <vscale x 16 x i1> %0 |
| } |
| |
| define <vscale x 16 x i1> @test_cmpne_wide(<vscale x 16 x i8> %a, <vscale x 2 x i64> %b){ |
| ; CHECK-LABEL: define <vscale x 16 x i1> @test_cmpne_wide( |
| ; CHECK-SAME: <vscale x 16 x i8> [[A:%.*]], <vscale x 2 x i64> [[B:%.*]]) { |
| ; CHECK-NEXT: [[ENTRY:.*:]] |
| ; CHECK-NEXT: ret <vscale x 16 x i1> zeroinitializer |
| ; |
| entry: |
| %0 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.cmpne.wide.nxv16i8(<vscale x 16 x i1> zeroinitializer, <vscale x 16 x i8> %a, <vscale x 2 x i64> %b) |
| ret <vscale x 16 x i1> %0 |
| } |
| |
| define <vscale x 8 x i1> @test_facge(<vscale x 8 x half> %a, <vscale x 8 x half> %b){ry: |
| ; CHECK-LABEL: define <vscale x 8 x i1> @test_facge( |
| ; CHECK-SAME: <vscale x 8 x half> [[A:%.*]], <vscale x 8 x half> [[B:%.*]]) { |
| ; CHECK-NEXT: [[RY:.*:]] |
| ; CHECK-NEXT: ret <vscale x 8 x i1> zeroinitializer |
| ; |
| %0 = tail call <vscale x 8 x i1> @llvm.aarch64.sve.facge.nxv8f16(<vscale x 8 x i1> zeroinitializer, <vscale x 8 x half> %a, <vscale x 8 x half> %b) |
| ret <vscale x 8 x i1> %0 |
| } |
| |
| define <vscale x 8 x i1> @test_facgt(<vscale x 8 x half> %a, <vscale x 8 x half> %b){ry: |
| ; CHECK-LABEL: define <vscale x 8 x i1> @test_facgt( |
| ; CHECK-SAME: <vscale x 8 x half> [[A:%.*]], <vscale x 8 x half> [[B:%.*]]) { |
| ; CHECK-NEXT: [[RY:.*:]] |
| ; CHECK-NEXT: ret <vscale x 8 x i1> zeroinitializer |
| ; |
| %0 = tail call <vscale x 8 x i1> @llvm.aarch64.sve.facgt.nxv8f16(<vscale x 8 x i1> zeroinitializer, <vscale x 8 x half> %a, <vscale x 8 x half> %b) |
| ret <vscale x 8 x i1> %0 |
| } |
| |
| define <vscale x 8 x i1> @test_fcmpeq(<vscale x 8 x half> %a, <vscale x 8 x half> %b){ |
| ; CHECK-LABEL: define <vscale x 8 x i1> @test_fcmpeq( |
| ; CHECK-SAME: <vscale x 8 x half> [[A:%.*]], <vscale x 8 x half> [[B:%.*]]) { |
| ; CHECK-NEXT: [[ENTRY:.*:]] |
| ; CHECK-NEXT: ret <vscale x 8 x i1> zeroinitializer |
| ; |
| entry: |
| %0 = tail call <vscale x 8 x i1> @llvm.aarch64.sve.fcmpeq.nxv16i8(<vscale x 8 x i1> zeroinitializer, <vscale x 8 x half> %a, <vscale x 8 x half> %b) |
| ret <vscale x 8 x i1> %0 |
| } |
| |
| define <vscale x 8 x i1> @test_fcmpge(<vscale x 8 x half> %a, <vscale x 8 x half> %b){ |
| ; CHECK-LABEL: define <vscale x 8 x i1> @test_fcmpge( |
| ; CHECK-SAME: <vscale x 8 x half> [[A:%.*]], <vscale x 8 x half> [[B:%.*]]) { |
| ; CHECK-NEXT: [[ENTRY:.*:]] |
| ; CHECK-NEXT: ret <vscale x 8 x i1> zeroinitializer |
| ; |
| entry: |
| %0 = tail call <vscale x 8 x i1> @llvm.aarch64.sve.fcmpge.nxv16i8(<vscale x 8 x i1> zeroinitializer, <vscale x 8 x half> %a, <vscale x 8 x half> %b) |
| ret <vscale x 8 x i1> %0 |
| } |
| |
| define <vscale x 8 x i1> @test_fcmpgt(<vscale x 8 x half> %a, <vscale x 8 x half> %b){ |
| ; CHECK-LABEL: define <vscale x 8 x i1> @test_fcmpgt( |
| ; CHECK-SAME: <vscale x 8 x half> [[A:%.*]], <vscale x 8 x half> [[B:%.*]]) { |
| ; CHECK-NEXT: [[ENTRY:.*:]] |
| ; CHECK-NEXT: ret <vscale x 8 x i1> zeroinitializer |
| ; |
| entry: |
| %0 = tail call <vscale x 8 x i1> @llvm.aarch64.sve.fcmpgt.nxv16i8(<vscale x 8 x i1> zeroinitializer, <vscale x 8 x half> %a, <vscale x 8 x half> %b) |
| ret <vscale x 8 x i1> %0 |
| } |
| |
| define <vscale x 8 x i1> @test_fcmpne(<vscale x 8 x half> %a, <vscale x 8 x half> %b){ |
| ; CHECK-LABEL: define <vscale x 8 x i1> @test_fcmpne( |
| ; CHECK-SAME: <vscale x 8 x half> [[A:%.*]], <vscale x 8 x half> [[B:%.*]]) { |
| ; CHECK-NEXT: [[ENTRY:.*:]] |
| ; CHECK-NEXT: ret <vscale x 8 x i1> zeroinitializer |
| ; |
| entry: |
| %0 = tail call <vscale x 8 x i1> @llvm.aarch64.sve.fcmpne.nxv16i8(<vscale x 8 x i1> zeroinitializer, <vscale x 8 x half> %a, <vscale x 8 x half> %b) |
| ret <vscale x 8 x i1> %0 |
| } |
| |
| define <vscale x 8 x i1> @test_fcmpuo(<vscale x 8 x half> %a, <vscale x 8 x half> %b){ |
| ; CHECK-LABEL: define <vscale x 8 x i1> @test_fcmpuo( |
| ; CHECK-SAME: <vscale x 8 x half> [[A:%.*]], <vscale x 8 x half> [[B:%.*]]) { |
| ; CHECK-NEXT: [[ENTRY:.*:]] |
| ; CHECK-NEXT: ret <vscale x 8 x i1> zeroinitializer |
| ; |
| entry: |
| %0 = tail call <vscale x 8 x i1> @llvm.aarch64.sve.fcmpuo.nxv16i8(<vscale x 8 x i1> zeroinitializer, <vscale x 8 x half> %a, <vscale x 8 x half> %b) |
| ret <vscale x 8 x i1> %0 |
| } |
| |