| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py | 
 | ; RUN: llc -mattr=+sve < %s | FileCheck %s | 
 |  | 
 | target triple = "aarch64-unknown-linux-gnu" | 
 |  | 
 | ; | 
 | ; ASR | 
 | ; | 
 |  | 
 | define <vscale x 16 x i8> @asr_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) { | 
 | ; CHECK-LABEL: asr_i8: | 
 | ; CHECK:       // %bb.0: | 
 | ; CHECK-NEXT:    asr z0.b, p0/m, z0.b, z1.b | 
 | ; CHECK-NEXT:    ret | 
 |   %out = call <vscale x 16 x i8> @llvm.aarch64.sve.asr.u.nxv16i8(<vscale x 16 x i1> %pg, | 
 |                                                                  <vscale x 16 x i8> %a, | 
 |                                                                  <vscale x 16 x i8> %b) | 
 |   ret <vscale x 16 x i8> %out | 
 | } | 
 |  | 
 | define <vscale x 8 x i16> @asr_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) { | 
 | ; CHECK-LABEL: asr_i16: | 
 | ; CHECK:       // %bb.0: | 
 | ; CHECK-NEXT:    asr z0.h, p0/m, z0.h, z1.h | 
 | ; CHECK-NEXT:    ret | 
 |   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.asr.u.nxv8i16(<vscale x 8 x i1> %pg, | 
 |                                                                  <vscale x 8 x i16> %a, | 
 |                                                                  <vscale x 8 x i16> %b) | 
 |   ret <vscale x 8 x i16> %out | 
 | } | 
 |  | 
 | define <vscale x 4 x i32> @asr_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) { | 
 | ; CHECK-LABEL: asr_i32: | 
 | ; CHECK:       // %bb.0: | 
 | ; CHECK-NEXT:    asr z0.s, p0/m, z0.s, z1.s | 
 | ; CHECK-NEXT:    ret | 
 |   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.asr.u.nxv4i32(<vscale x 4 x i1> %pg, | 
 |                                                                  <vscale x 4 x i32> %a, | 
 |                                                                  <vscale x 4 x i32> %b) | 
 |   ret <vscale x 4 x i32> %out | 
 | } | 
 |  | 
 | define <vscale x 2 x i64> @asr_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) { | 
 | ; CHECK-LABEL: asr_i64: | 
 | ; CHECK:       // %bb.0: | 
 | ; CHECK-NEXT:    asr z0.d, p0/m, z0.d, z1.d | 
 | ; CHECK-NEXT:    ret | 
 |   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.asr.u.nxv2i64(<vscale x 2 x i1> %pg, | 
 |                                                                  <vscale x 2 x i64> %a, | 
 |                                                                  <vscale x 2 x i64> %b) | 
 |   ret <vscale x 2 x i64> %out | 
 | } | 
 |  | 
 | ; | 
 | ; ASR (immediate) | 
 | ; | 
 |  | 
 | define <vscale x 16 x i8> @asr_imm_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) { | 
 | ; CHECK-LABEL: asr_imm_i8: | 
 | ; CHECK:       // %bb.0: | 
 | ; CHECK-NEXT:    asr z0.b, z0.b, #3 | 
 | ; CHECK-NEXT:    ret | 
 |   %out = call <vscale x 16 x i8> @llvm.aarch64.sve.asr.u.nxv16i8(<vscale x 16 x i1> %pg, | 
 |                                                                  <vscale x 16 x i8> %a, | 
 |                                                                  <vscale x 16 x i8> splat(i8 3)) | 
 |   ret <vscale x 16 x i8> %out | 
 | } | 
 |  | 
 | define <vscale x 8 x i16> @asr_imm_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) { | 
 | ; CHECK-LABEL: asr_imm_i16: | 
 | ; CHECK:       // %bb.0: | 
 | ; CHECK-NEXT:    asr z0.h, z0.h, #4 | 
 | ; CHECK-NEXT:    ret | 
 |   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.asr.u.nxv8i16(<vscale x 8 x i1> %pg, | 
 |                                                                  <vscale x 8 x i16> %a, | 
 |                                                                  <vscale x 8 x i16> splat(i16 4)) | 
 |   ret <vscale x 8 x i16> %out | 
 | } | 
 |  | 
 | define <vscale x 4 x i32> @asr_imm_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) { | 
 | ; CHECK-LABEL: asr_imm_i32: | 
 | ; CHECK:       // %bb.0: | 
 | ; CHECK-NEXT:    asr z0.s, z0.s, #5 | 
 | ; CHECK-NEXT:    ret | 
 |   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.asr.u.nxv4i32(<vscale x 4 x i1> %pg, | 
 |                                                                  <vscale x 4 x i32> %a, | 
 |                                                                  <vscale x 4 x i32> splat(i32 5)) | 
 |   ret <vscale x 4 x i32> %out | 
 | } | 
 |  | 
 | define <vscale x 2 x i64> @asr_imm_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) { | 
 | ; CHECK-LABEL: asr_imm_i64: | 
 | ; CHECK:       // %bb.0: | 
 | ; CHECK-NEXT:    asr z0.d, z0.d, #6 | 
 | ; CHECK-NEXT:    ret | 
 |   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.asr.u.nxv2i64(<vscale x 2 x i1> %pg, | 
 |                                                                  <vscale x 2 x i64> %a, | 
 |                                                                  <vscale x 2 x i64> splat(i64 6)) | 
 |   ret <vscale x 2 x i64> %out | 
 | } | 
 |  | 
 | ; | 
 | ; LSL | 
 | ; | 
 |  | 
 | define <vscale x 16 x i8> @lsl_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) { | 
 | ; CHECK-LABEL: lsl_i8: | 
 | ; CHECK:       // %bb.0: | 
 | ; CHECK-NEXT:    lsl z0.b, p0/m, z0.b, z1.b | 
 | ; CHECK-NEXT:    ret | 
 |   %out = call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.u.nxv16i8(<vscale x 16 x i1> %pg, | 
 |                                                                  <vscale x 16 x i8> %a, | 
 |                                                                  <vscale x 16 x i8> %b) | 
 |   ret <vscale x 16 x i8> %out | 
 | } | 
 |  | 
 | define <vscale x 8 x i16> @lsl_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) { | 
 | ; CHECK-LABEL: lsl_i16: | 
 | ; CHECK:       // %bb.0: | 
 | ; CHECK-NEXT:    lsl z0.h, p0/m, z0.h, z1.h | 
 | ; CHECK-NEXT:    ret | 
 |   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.u.nxv8i16(<vscale x 8 x i1> %pg, | 
 |                                                                  <vscale x 8 x i16> %a, | 
 |                                                                  <vscale x 8 x i16> %b) | 
 |   ret <vscale x 8 x i16> %out | 
 | } | 
 |  | 
 | define <vscale x 4 x i32> @lsl_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) { | 
 | ; CHECK-LABEL: lsl_i32: | 
 | ; CHECK:       // %bb.0: | 
 | ; CHECK-NEXT:    lsl z0.s, p0/m, z0.s, z1.s | 
 | ; CHECK-NEXT:    ret | 
 |   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.u.nxv4i32(<vscale x 4 x i1> %pg, | 
 |                                                                  <vscale x 4 x i32> %a, | 
 |                                                                  <vscale x 4 x i32> %b) | 
 |   ret <vscale x 4 x i32> %out | 
 | } | 
 |  | 
 | define <vscale x 2 x i64> @lsl_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) { | 
 | ; CHECK-LABEL: lsl_i64: | 
 | ; CHECK:       // %bb.0: | 
 | ; CHECK-NEXT:    lsl z0.d, p0/m, z0.d, z1.d | 
 | ; CHECK-NEXT:    ret | 
 |   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.lsl.u.nxv2i64(<vscale x 2 x i1> %pg, | 
 |                                                                  <vscale x 2 x i64> %a, | 
 |                                                                  <vscale x 2 x i64> %b) | 
 |   ret <vscale x 2 x i64> %out | 
 | } | 
 |  | 
 | ; | 
 | ; LSL (immediate) | 
 | ; | 
 |  | 
 | define <vscale x 16 x i8> @lsl_imm_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) { | 
 | ; CHECK-LABEL: lsl_imm_i8: | 
 | ; CHECK:       // %bb.0: | 
 | ; CHECK-NEXT:    lsl z0.b, z0.b, #7 | 
 | ; CHECK-NEXT:    ret | 
 |   %out = call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.u.nxv16i8(<vscale x 16 x i1> %pg, | 
 |                                                                  <vscale x 16 x i8> %a, | 
 |                                                                  <vscale x 16 x i8> splat(i8 7)) | 
 |   ret <vscale x 16 x i8> %out | 
 | } | 
 |  | 
 | define <vscale x 8 x i16> @lsl_imm_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) { | 
 | ; CHECK-LABEL: lsl_imm_i16: | 
 | ; CHECK:       // %bb.0: | 
 | ; CHECK-NEXT:    lsl z0.h, z0.h, #8 | 
 | ; CHECK-NEXT:    ret | 
 |   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.u.nxv8i16(<vscale x 8 x i1> %pg, | 
 |                                                                  <vscale x 8 x i16> %a, | 
 |                                                                  <vscale x 8 x i16> splat(i16 8)) | 
 |   ret <vscale x 8 x i16> %out | 
 | } | 
 |  | 
 | define <vscale x 4 x i32> @lsl_imm_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) { | 
 | ; CHECK-LABEL: lsl_imm_i32: | 
 | ; CHECK:       // %bb.0: | 
 | ; CHECK-NEXT:    lsl z0.s, z0.s, #9 | 
 | ; CHECK-NEXT:    ret | 
 |   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.u.nxv4i32(<vscale x 4 x i1> %pg, | 
 |                                                                  <vscale x 4 x i32> %a, | 
 |                                                                  <vscale x 4 x i32> splat(i32 9)) | 
 |   ret <vscale x 4 x i32> %out | 
 | } | 
 |  | 
 | define <vscale x 2 x i64> @lsl_imm_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) { | 
 | ; CHECK-LABEL: lsl_imm_i64: | 
 | ; CHECK:       // %bb.0: | 
 | ; CHECK-NEXT:    lsl z0.d, z0.d, #10 | 
 | ; CHECK-NEXT:    ret | 
 |   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.lsl.u.nxv2i64(<vscale x 2 x i1> %pg, | 
 |                                                                  <vscale x 2 x i64> %a, | 
 |                                                                  <vscale x 2 x i64> splat(i64 10)) | 
 |   ret <vscale x 2 x i64> %out | 
 | } | 
 |  | 
 | ; | 
 | ; LSR | 
 | ; | 
 |  | 
 | define <vscale x 16 x i8> @lsr_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) { | 
 | ; CHECK-LABEL: lsr_i8: | 
 | ; CHECK:       // %bb.0: | 
 | ; CHECK-NEXT:    lsr z0.b, p0/m, z0.b, z1.b | 
 | ; CHECK-NEXT:    ret | 
 |   %out = call <vscale x 16 x i8> @llvm.aarch64.sve.lsr.u.nxv16i8(<vscale x 16 x i1> %pg, | 
 |                                                                  <vscale x 16 x i8> %a, | 
 |                                                                  <vscale x 16 x i8> %b) | 
 |   ret <vscale x 16 x i8> %out | 
 | } | 
 |  | 
 | define <vscale x 8 x i16> @lsr_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) { | 
 | ; CHECK-LABEL: lsr_i16: | 
 | ; CHECK:       // %bb.0: | 
 | ; CHECK-NEXT:    lsr z0.h, p0/m, z0.h, z1.h | 
 | ; CHECK-NEXT:    ret | 
 |   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.lsr.u.nxv8i16(<vscale x 8 x i1> %pg, | 
 |                                                                  <vscale x 8 x i16> %a, | 
 |                                                                  <vscale x 8 x i16> %b) | 
 |   ret <vscale x 8 x i16> %out | 
 | } | 
 |  | 
 | define <vscale x 4 x i32> @lsr_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) { | 
 | ; CHECK-LABEL: lsr_i32: | 
 | ; CHECK:       // %bb.0: | 
 | ; CHECK-NEXT:    lsr z0.s, p0/m, z0.s, z1.s | 
 | ; CHECK-NEXT:    ret | 
 |   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.lsr.u.nxv4i32(<vscale x 4 x i1> %pg, | 
 |                                                                  <vscale x 4 x i32> %a, | 
 |                                                                  <vscale x 4 x i32> %b) | 
 |   ret <vscale x 4 x i32> %out | 
 | } | 
 |  | 
 | define <vscale x 2 x i64> @lsr_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) { | 
 | ; CHECK-LABEL: lsr_i64: | 
 | ; CHECK:       // %bb.0: | 
 | ; CHECK-NEXT:    lsr z0.d, p0/m, z0.d, z1.d | 
 | ; CHECK-NEXT:    ret | 
 |   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.lsr.u.nxv2i64(<vscale x 2 x i1> %pg, | 
 |                                                                  <vscale x 2 x i64> %a, | 
 |                                                                  <vscale x 2 x i64> %b) | 
 |   ret <vscale x 2 x i64> %out | 
 | } | 
 |  | 
 | ; | 
 | ; LSR (immediate) | 
 | ; | 
 |  | 
 | define <vscale x 16 x i8> @lsr_imm_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) { | 
 | ; CHECK-LABEL: lsr_imm_i8: | 
 | ; CHECK:       // %bb.0: | 
 | ; CHECK-NEXT:    lsr z0.b, z0.b, #8 | 
 | ; CHECK-NEXT:    ret | 
 |   %out = call <vscale x 16 x i8> @llvm.aarch64.sve.lsr.u.nxv16i8(<vscale x 16 x i1> %pg, | 
 |                                                                  <vscale x 16 x i8> %a, | 
 |                                                                  <vscale x 16 x i8> splat(i8 8)) | 
 |   ret <vscale x 16 x i8> %out | 
 | } | 
 |  | 
 | define <vscale x 8 x i16> @lsr_imm_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) { | 
 | ; CHECK-LABEL: lsr_imm_i16: | 
 | ; CHECK:       // %bb.0: | 
 | ; CHECK-NEXT:    lsr z0.h, z0.h, #12 | 
 | ; CHECK-NEXT:    ret | 
 |   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.lsr.u.nxv8i16(<vscale x 8 x i1> %pg, | 
 |                                                                  <vscale x 8 x i16> %a, | 
 |                                                                  <vscale x 8 x i16> splat(i16 12)) | 
 |   ret <vscale x 8 x i16> %out | 
 | } | 
 |  | 
 | define <vscale x 4 x i32> @lsr_imm_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) { | 
 | ; CHECK-LABEL: lsr_imm_i32: | 
 | ; CHECK:       // %bb.0: | 
 | ; CHECK-NEXT:    lsr z0.s, z0.s, #13 | 
 | ; CHECK-NEXT:    ret | 
 |   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.lsr.u.nxv4i32(<vscale x 4 x i1> %pg, | 
 |                                                                  <vscale x 4 x i32> %a, | 
 |                                                                  <vscale x 4 x i32> splat(i32 13)) | 
 |   ret <vscale x 4 x i32> %out | 
 | } | 
 |  | 
 | define <vscale x 2 x i64> @lsr_imm_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) { | 
 | ; CHECK-LABEL: lsr_imm_i64: | 
 | ; CHECK:       // %bb.0: | 
 | ; CHECK-NEXT:    lsr z0.d, z0.d, #14 | 
 | ; CHECK-NEXT:    ret | 
 |   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.lsr.u.nxv2i64(<vscale x 2 x i1> %pg, | 
 |                                                                  <vscale x 2 x i64> %a, | 
 |                                                                  <vscale x 2 x i64> splat(i64 14)) | 
 |   ret <vscale x 2 x i64> %out | 
 | } | 
 |  | 
 | declare <vscale x 16 x i8> @llvm.aarch64.sve.asr.u.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>) | 
 | declare <vscale x 8 x i16> @llvm.aarch64.sve.asr.u.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>) | 
 | declare <vscale x 4 x i32> @llvm.aarch64.sve.asr.u.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>) | 
 | declare <vscale x 2 x i64> @llvm.aarch64.sve.asr.u.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>) | 
 |  | 
 | declare <vscale x 16 x i8> @llvm.aarch64.sve.lsl.u.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>) | 
 | declare <vscale x 8 x i16> @llvm.aarch64.sve.lsl.u.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>) | 
 | declare <vscale x 4 x i32> @llvm.aarch64.sve.lsl.u.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>) | 
 | declare <vscale x 2 x i64> @llvm.aarch64.sve.lsl.u.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>) | 
 |  | 
 | declare <vscale x 16 x i8> @llvm.aarch64.sve.lsr.u.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>) | 
 | declare <vscale x 8 x i16> @llvm.aarch64.sve.lsr.u.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>) | 
 | declare <vscale x 4 x i32> @llvm.aarch64.sve.lsr.u.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>) | 
 | declare <vscale x 2 x i64> @llvm.aarch64.sve.lsr.u.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>) |