| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py | 
 | ; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve %s -o - | FileCheck %s | 
 |  | 
 | ; Test flag-setting bitwise logical instructions are used and redundant ptest | 
 | ; instruction is removed. | 
 |  | 
 | define i1 @and(<vscale x 16 x i1> %pg, <vscale x 16 x i1> %a, <vscale x 16 x i1> %b) { | 
 | ; CHECK-LABEL: and: | 
 | ; CHECK:       // %bb.0: | 
 | ; CHECK-NEXT:    ands p0.b, p0/z, p1.b, p2.b | 
 | ; CHECK-NEXT:    cset w0, ne | 
 | ; CHECK-NEXT:    ret | 
 |   %1 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.and.z.nxv16i1(<vscale x 16 x i1> %pg, <vscale x 16 x i1> %a, <vscale x 16 x i1> %b) | 
 |   %2 = tail call i1 @llvm.aarch64.sve.ptest.any.nxv16i1(<vscale x 16 x i1> %pg, <vscale x 16 x i1> %1) | 
 |   ret i1 %2 | 
 | } | 
 |  | 
 | define i1 @bic(<vscale x 16 x i1> %pg, <vscale x 16 x i1> %a, <vscale x 16 x i1> %b) { | 
 | ; CHECK-LABEL: bic: | 
 | ; CHECK:       // %bb.0: | 
 | ; CHECK-NEXT:    bics p0.b, p0/z, p1.b, p2.b | 
 | ; CHECK-NEXT:    cset w0, ne | 
 | ; CHECK-NEXT:    ret | 
 |   %1 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.bic.z.nxv16i1(<vscale x 16 x i1> %pg, <vscale x 16 x i1> %a, <vscale x 16 x i1> %b) | 
 |   %2 = tail call i1 @llvm.aarch64.sve.ptest.any.nxv16i1(<vscale x 16 x i1> %pg, <vscale x 16 x i1> %1) | 
 |   ret i1 %2 | 
 | } | 
 |  | 
 | define i1 @eor(<vscale x 16 x i1> %pg, <vscale x 16 x i1> %a, <vscale x 16 x i1> %b) { | 
 | ; CHECK-LABEL: eor: | 
 | ; CHECK:       // %bb.0: | 
 | ; CHECK-NEXT:    eors p0.b, p0/z, p1.b, p2.b | 
 | ; CHECK-NEXT:    cset w0, ne | 
 | ; CHECK-NEXT:    ret | 
 |   %1 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.eor.z.nxv16i1(<vscale x 16 x i1> %pg, <vscale x 16 x i1> %a, <vscale x 16 x i1> %b) | 
 |   %2 = tail call i1 @llvm.aarch64.sve.ptest.any.nxv16i1(<vscale x 16 x i1> %pg, <vscale x 16 x i1> %1) | 
 |   ret i1 %2 | 
 | } | 
 |  | 
 | define i1 @nand(<vscale x 16 x i1> %pg, <vscale x 16 x i1> %a, <vscale x 16 x i1> %b) { | 
 | ; CHECK-LABEL: nand: | 
 | ; CHECK:       // %bb.0: | 
 | ; CHECK-NEXT:    nands p0.b, p0/z, p1.b, p2.b | 
 | ; CHECK-NEXT:    cset w0, ne | 
 | ; CHECK-NEXT:    ret | 
 |   %1 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.nand.z.nxv16i1(<vscale x 16 x i1> %pg, <vscale x 16 x i1> %a, <vscale x 16 x i1> %b) | 
 |   %2 = tail call i1 @llvm.aarch64.sve.ptest.any.nxv16i1(<vscale x 16 x i1> %pg, <vscale x 16 x i1> %1) | 
 |   ret i1 %2 | 
 | } | 
 |  | 
 | define i1 @nor(<vscale x 16 x i1> %pg, <vscale x 16 x i1> %a, <vscale x 16 x i1> %b) { | 
 | ; CHECK-LABEL: nor: | 
 | ; CHECK:       // %bb.0: | 
 | ; CHECK-NEXT:    nors p0.b, p0/z, p1.b, p2.b | 
 | ; CHECK-NEXT:    cset w0, ne | 
 | ; CHECK-NEXT:    ret | 
 |   %1 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.nor.z.nxv16i1(<vscale x 16 x i1> %pg, <vscale x 16 x i1> %a, <vscale x 16 x i1> %b) | 
 |   %2 = tail call i1 @llvm.aarch64.sve.ptest.any.nxv16i1(<vscale x 16 x i1> %pg, <vscale x 16 x i1> %1) | 
 |   ret i1 %2 | 
 | } | 
 |  | 
 | define i1 @orn(<vscale x 16 x i1> %pg, <vscale x 16 x i1> %a, <vscale x 16 x i1> %b) { | 
 | ; CHECK-LABEL: orn: | 
 | ; CHECK:       // %bb.0: | 
 | ; CHECK-NEXT:    orns p0.b, p0/z, p1.b, p2.b | 
 | ; CHECK-NEXT:    cset w0, ne | 
 | ; CHECK-NEXT:    ret | 
 |   %1 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.orn.z.nxv16i1(<vscale x 16 x i1> %pg, <vscale x 16 x i1> %a, <vscale x 16 x i1> %b) | 
 |   %2 = tail call i1 @llvm.aarch64.sve.ptest.any.nxv16i1(<vscale x 16 x i1> %pg, <vscale x 16 x i1> %1) | 
 |   ret i1 %2 | 
 | } | 
 |  | 
 | define i1 @orr(<vscale x 16 x i1> %pg, <vscale x 16 x i1> %a, <vscale x 16 x i1> %b) { | 
 | ; CHECK-LABEL: orr: | 
 | ; CHECK:       // %bb.0: | 
 | ; CHECK-NEXT:    orrs p0.b, p0/z, p1.b, p2.b | 
 | ; CHECK-NEXT:    cset w0, ne | 
 | ; CHECK-NEXT:    ret | 
 |   %1 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.orr.z.nxv16i1(<vscale x 16 x i1> %pg, <vscale x 16 x i1> %a, <vscale x 16 x i1> %b) | 
 |   %2 = tail call i1 @llvm.aarch64.sve.ptest.any.nxv16i1(<vscale x 16 x i1> %pg, <vscale x 16 x i1> %1) | 
 |   ret i1 %2 | 
 | } | 
 |  | 
 | declare <vscale x 16 x i1> @llvm.aarch64.sve.and.z.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>) | 
 | declare <vscale x 16 x i1> @llvm.aarch64.sve.bic.z.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>) | 
 | declare <vscale x 16 x i1> @llvm.aarch64.sve.eor.z.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>) | 
 | declare <vscale x 16 x i1> @llvm.aarch64.sve.nand.z.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>) | 
 | declare <vscale x 16 x i1> @llvm.aarch64.sve.nor.z.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>) | 
 | declare <vscale x 16 x i1> @llvm.aarch64.sve.orn.z.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>) | 
 | declare <vscale x 16 x i1> @llvm.aarch64.sve.orr.z.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>) | 
 | declare i1 @llvm.aarch64.sve.ptest.any.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>) |