blob: f295bd8d74df36eeb23ad984a88a25e4119dfa25 [file] [log] [blame]
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
; RUN: llc -mtriple=riscv32 -mattr=+v,+m -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-RV32
; RUN: llc -mtriple=riscv64 -mattr=+v,+m -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-RV64
; RUN: llc -mtriple=riscv32 -mattr=+v,+m,+zvkb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVKB,CHECK-ZVKB-NOZBB,CHECK-ZVKB32,CHECK-ZVKB-NOZBB32
; RUN: llc -mtriple=riscv64 -mattr=+v,+m,+zvkb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVKB,CHECK-ZVKB-NOZBB,CHECK-ZVKB64,CHECK-ZVKB-NOZBB64
; RUN: llc -mtriple=riscv32 -mattr=+v,+m,+zvkb,+zbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVKB,CHECK-ZVKB-ZBB,CHECK-ZVKB32,CHECK-ZVKB-ZBB32
; RUN: llc -mtriple=riscv64 -mattr=+v,+m,+zvkb,+zbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVKB,CHECK-ZVKB-ZBB,CHECK-ZVKB64,CHECK-ZVKB-ZBB64
define <vscale x 1 x i8> @vandn_vv_nxv1i8(<vscale x 1 x i8> %x, <vscale x 1 x i8> %y) {
; CHECK-LABEL: vandn_vv_nxv1i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
; CHECK-NEXT: vnot.v v8, v8
; CHECK-NEXT: vand.vv v8, v8, v9
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vv_nxv1i8:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
; CHECK-ZVKB-NEXT: vandn.vv v8, v9, v8
; CHECK-ZVKB-NEXT: ret
%a = xor <vscale x 1 x i8> %x, splat (i8 -1)
%b = and <vscale x 1 x i8> %a, %y
ret <vscale x 1 x i8> %b
}
define <vscale x 1 x i8> @vandn_vv_swapped_nxv1i8(<vscale x 1 x i8> %x, <vscale x 1 x i8> %y) {
; CHECK-LABEL: vandn_vv_swapped_nxv1i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
; CHECK-NEXT: vnot.v v8, v8
; CHECK-NEXT: vand.vv v8, v9, v8
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vv_swapped_nxv1i8:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
; CHECK-ZVKB-NEXT: vandn.vv v8, v9, v8
; CHECK-ZVKB-NEXT: ret
%a = xor <vscale x 1 x i8> %x, splat (i8 -1)
%b = and <vscale x 1 x i8> %y, %a
ret <vscale x 1 x i8> %b
}
define <vscale x 1 x i8> @vandn_vx_nxv1i8(i8 %x, <vscale x 1 x i8> %y) {
; CHECK-LABEL: vandn_vx_nxv1i8:
; CHECK: # %bb.0:
; CHECK-NEXT: not a0, a0
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vx_nxv1i8:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a0
; CHECK-ZVKB-NEXT: ret
%a = xor i8 %x, -1
%head = insertelement <vscale x 1 x i8> poison, i8 %a, i32 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
%b = and <vscale x 1 x i8> %splat, %y
ret <vscale x 1 x i8> %b
}
define <vscale x 1 x i8> @vandn_vx_swapped_nxv1i8(i8 %x, <vscale x 1 x i8> %y) {
; CHECK-LABEL: vandn_vx_swapped_nxv1i8:
; CHECK: # %bb.0:
; CHECK-NEXT: not a0, a0
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vx_swapped_nxv1i8:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a0
; CHECK-ZVKB-NEXT: ret
%a = xor i8 %x, -1
%head = insertelement <vscale x 1 x i8> poison, i8 %a, i32 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
%b = and <vscale x 1 x i8> %splat, %y
ret <vscale x 1 x i8> %b
}
define <vscale x 2 x i8> @vandn_vv_nxv2i8(<vscale x 2 x i8> %x, <vscale x 2 x i8> %y) {
; CHECK-LABEL: vandn_vv_nxv2i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
; CHECK-NEXT: vnot.v v8, v8
; CHECK-NEXT: vand.vv v8, v8, v9
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vv_nxv2i8:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
; CHECK-ZVKB-NEXT: vandn.vv v8, v9, v8
; CHECK-ZVKB-NEXT: ret
%a = xor <vscale x 2 x i8> %x, splat (i8 -1)
%b = and <vscale x 2 x i8> %a, %y
ret <vscale x 2 x i8> %b
}
define <vscale x 2 x i8> @vandn_vv_swapped_nxv2i8(<vscale x 2 x i8> %x, <vscale x 2 x i8> %y) {
; CHECK-LABEL: vandn_vv_swapped_nxv2i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
; CHECK-NEXT: vnot.v v8, v8
; CHECK-NEXT: vand.vv v8, v9, v8
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vv_swapped_nxv2i8:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
; CHECK-ZVKB-NEXT: vandn.vv v8, v9, v8
; CHECK-ZVKB-NEXT: ret
%a = xor <vscale x 2 x i8> %x, splat (i8 -1)
%b = and <vscale x 2 x i8> %y, %a
ret <vscale x 2 x i8> %b
}
define <vscale x 2 x i8> @vandn_vx_nxv2i8(i8 %x, <vscale x 2 x i8> %y) {
; CHECK-LABEL: vandn_vx_nxv2i8:
; CHECK: # %bb.0:
; CHECK-NEXT: not a0, a0
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vx_nxv2i8:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a0
; CHECK-ZVKB-NEXT: ret
%a = xor i8 %x, -1
%head = insertelement <vscale x 2 x i8> poison, i8 %a, i32 0
%splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
%b = and <vscale x 2 x i8> %splat, %y
ret <vscale x 2 x i8> %b
}
define <vscale x 2 x i8> @vandn_vx_swapped_nxv2i8(i8 %x, <vscale x 2 x i8> %y) {
; CHECK-LABEL: vandn_vx_swapped_nxv2i8:
; CHECK: # %bb.0:
; CHECK-NEXT: not a0, a0
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vx_swapped_nxv2i8:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a0
; CHECK-ZVKB-NEXT: ret
%a = xor i8 %x, -1
%head = insertelement <vscale x 2 x i8> poison, i8 %a, i32 0
%splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
%b = and <vscale x 2 x i8> %splat, %y
ret <vscale x 2 x i8> %b
}
define <vscale x 4 x i8> @vandn_vv_nxv4i8(<vscale x 4 x i8> %x, <vscale x 4 x i8> %y) {
; CHECK-LABEL: vandn_vv_nxv4i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
; CHECK-NEXT: vnot.v v8, v8
; CHECK-NEXT: vand.vv v8, v8, v9
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vv_nxv4i8:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
; CHECK-ZVKB-NEXT: vandn.vv v8, v9, v8
; CHECK-ZVKB-NEXT: ret
%a = xor <vscale x 4 x i8> %x, splat (i8 -1)
%b = and <vscale x 4 x i8> %a, %y
ret <vscale x 4 x i8> %b
}
define <vscale x 4 x i8> @vandn_vv_swapped_nxv4i8(<vscale x 4 x i8> %x, <vscale x 4 x i8> %y) {
; CHECK-LABEL: vandn_vv_swapped_nxv4i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
; CHECK-NEXT: vnot.v v8, v8
; CHECK-NEXT: vand.vv v8, v9, v8
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vv_swapped_nxv4i8:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
; CHECK-ZVKB-NEXT: vandn.vv v8, v9, v8
; CHECK-ZVKB-NEXT: ret
%a = xor <vscale x 4 x i8> %x, splat (i8 -1)
%b = and <vscale x 4 x i8> %y, %a
ret <vscale x 4 x i8> %b
}
define <vscale x 4 x i8> @vandn_vx_nxv4i8(i8 %x, <vscale x 4 x i8> %y) {
; CHECK-LABEL: vandn_vx_nxv4i8:
; CHECK: # %bb.0:
; CHECK-NEXT: not a0, a0
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vx_nxv4i8:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a0
; CHECK-ZVKB-NEXT: ret
%a = xor i8 %x, -1
%head = insertelement <vscale x 4 x i8> poison, i8 %a, i32 0
%splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
%b = and <vscale x 4 x i8> %splat, %y
ret <vscale x 4 x i8> %b
}
define <vscale x 4 x i8> @vandn_vx_swapped_nxv4i8(i8 %x, <vscale x 4 x i8> %y) {
; CHECK-LABEL: vandn_vx_swapped_nxv4i8:
; CHECK: # %bb.0:
; CHECK-NEXT: not a0, a0
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vx_swapped_nxv4i8:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a0
; CHECK-ZVKB-NEXT: ret
%a = xor i8 %x, -1
%head = insertelement <vscale x 4 x i8> poison, i8 %a, i32 0
%splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
%b = and <vscale x 4 x i8> %splat, %y
ret <vscale x 4 x i8> %b
}
define <vscale x 8 x i8> @vandn_vv_nxv8i8(<vscale x 8 x i8> %x, <vscale x 8 x i8> %y) {
; CHECK-LABEL: vandn_vv_nxv8i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
; CHECK-NEXT: vnot.v v8, v8
; CHECK-NEXT: vand.vv v8, v8, v9
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vv_nxv8i8:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a0, zero, e8, m1, ta, ma
; CHECK-ZVKB-NEXT: vandn.vv v8, v9, v8
; CHECK-ZVKB-NEXT: ret
%a = xor <vscale x 8 x i8> %x, splat (i8 -1)
%b = and <vscale x 8 x i8> %a, %y
ret <vscale x 8 x i8> %b
}
define <vscale x 8 x i8> @vandn_vv_swapped_nxv8i8(<vscale x 8 x i8> %x, <vscale x 8 x i8> %y) {
; CHECK-LABEL: vandn_vv_swapped_nxv8i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
; CHECK-NEXT: vnot.v v8, v8
; CHECK-NEXT: vand.vv v8, v9, v8
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vv_swapped_nxv8i8:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a0, zero, e8, m1, ta, ma
; CHECK-ZVKB-NEXT: vandn.vv v8, v9, v8
; CHECK-ZVKB-NEXT: ret
%a = xor <vscale x 8 x i8> %x, splat (i8 -1)
%b = and <vscale x 8 x i8> %y, %a
ret <vscale x 8 x i8> %b
}
define <vscale x 8 x i8> @vandn_vx_nxv8i8(i8 %x, <vscale x 8 x i8> %y) {
; CHECK-LABEL: vandn_vx_nxv8i8:
; CHECK: # %bb.0:
; CHECK-NEXT: not a0, a0
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vx_nxv8i8:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a1, zero, e8, m1, ta, ma
; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a0
; CHECK-ZVKB-NEXT: ret
%a = xor i8 %x, -1
%head = insertelement <vscale x 8 x i8> poison, i8 %a, i32 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
%b = and <vscale x 8 x i8> %splat, %y
ret <vscale x 8 x i8> %b
}
define <vscale x 8 x i8> @vandn_vx_swapped_nxv8i8(i8 %x, <vscale x 8 x i8> %y) {
; CHECK-LABEL: vandn_vx_swapped_nxv8i8:
; CHECK: # %bb.0:
; CHECK-NEXT: not a0, a0
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vx_swapped_nxv8i8:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a1, zero, e8, m1, ta, ma
; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a0
; CHECK-ZVKB-NEXT: ret
%a = xor i8 %x, -1
%head = insertelement <vscale x 8 x i8> poison, i8 %a, i32 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
%b = and <vscale x 8 x i8> %splat, %y
ret <vscale x 8 x i8> %b
}
define <vscale x 16 x i8> @vandn_vv_nxv16i8(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y) {
; CHECK-LABEL: vandn_vv_nxv16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
; CHECK-NEXT: vnot.v v8, v8
; CHECK-NEXT: vand.vv v8, v8, v10
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vv_nxv16i8:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a0, zero, e8, m2, ta, ma
; CHECK-ZVKB-NEXT: vandn.vv v8, v10, v8
; CHECK-ZVKB-NEXT: ret
%a = xor <vscale x 16 x i8> %x, splat (i8 -1)
%b = and <vscale x 16 x i8> %a, %y
ret <vscale x 16 x i8> %b
}
define <vscale x 16 x i8> @vandn_vv_swapped_nxv16i8(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y) {
; CHECK-LABEL: vandn_vv_swapped_nxv16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
; CHECK-NEXT: vnot.v v8, v8
; CHECK-NEXT: vand.vv v8, v10, v8
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vv_swapped_nxv16i8:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a0, zero, e8, m2, ta, ma
; CHECK-ZVKB-NEXT: vandn.vv v8, v10, v8
; CHECK-ZVKB-NEXT: ret
%a = xor <vscale x 16 x i8> %x, splat (i8 -1)
%b = and <vscale x 16 x i8> %y, %a
ret <vscale x 16 x i8> %b
}
define <vscale x 16 x i8> @vandn_vx_nxv16i8(i8 %x, <vscale x 16 x i8> %y) {
; CHECK-LABEL: vandn_vx_nxv16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: not a0, a0
; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vx_nxv16i8:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a1, zero, e8, m2, ta, ma
; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a0
; CHECK-ZVKB-NEXT: ret
%a = xor i8 %x, -1
%head = insertelement <vscale x 16 x i8> poison, i8 %a, i32 0
%splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
%b = and <vscale x 16 x i8> %splat, %y
ret <vscale x 16 x i8> %b
}
define <vscale x 16 x i8> @vandn_vx_swapped_nxv16i8(i8 %x, <vscale x 16 x i8> %y) {
; CHECK-LABEL: vandn_vx_swapped_nxv16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: not a0, a0
; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vx_swapped_nxv16i8:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a1, zero, e8, m2, ta, ma
; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a0
; CHECK-ZVKB-NEXT: ret
%a = xor i8 %x, -1
%head = insertelement <vscale x 16 x i8> poison, i8 %a, i32 0
%splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
%b = and <vscale x 16 x i8> %splat, %y
ret <vscale x 16 x i8> %b
}
define <vscale x 32 x i8> @vandn_vv_nxv32i8(<vscale x 32 x i8> %x, <vscale x 32 x i8> %y) {
; CHECK-LABEL: vandn_vv_nxv32i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
; CHECK-NEXT: vnot.v v8, v8
; CHECK-NEXT: vand.vv v8, v8, v12
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vv_nxv32i8:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a0, zero, e8, m4, ta, ma
; CHECK-ZVKB-NEXT: vandn.vv v8, v12, v8
; CHECK-ZVKB-NEXT: ret
%a = xor <vscale x 32 x i8> %x, splat (i8 -1)
%b = and <vscale x 32 x i8> %a, %y
ret <vscale x 32 x i8> %b
}
define <vscale x 32 x i8> @vandn_vv_swapped_nxv32i8(<vscale x 32 x i8> %x, <vscale x 32 x i8> %y) {
; CHECK-LABEL: vandn_vv_swapped_nxv32i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
; CHECK-NEXT: vnot.v v8, v8
; CHECK-NEXT: vand.vv v8, v12, v8
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vv_swapped_nxv32i8:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a0, zero, e8, m4, ta, ma
; CHECK-ZVKB-NEXT: vandn.vv v8, v12, v8
; CHECK-ZVKB-NEXT: ret
%a = xor <vscale x 32 x i8> %x, splat (i8 -1)
%b = and <vscale x 32 x i8> %y, %a
ret <vscale x 32 x i8> %b
}
define <vscale x 32 x i8> @vandn_vx_nxv32i8(i8 %x, <vscale x 32 x i8> %y) {
; CHECK-LABEL: vandn_vx_nxv32i8:
; CHECK: # %bb.0:
; CHECK-NEXT: not a0, a0
; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vx_nxv32i8:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a1, zero, e8, m4, ta, ma
; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a0
; CHECK-ZVKB-NEXT: ret
%a = xor i8 %x, -1
%head = insertelement <vscale x 32 x i8> poison, i8 %a, i32 0
%splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
%b = and <vscale x 32 x i8> %splat, %y
ret <vscale x 32 x i8> %b
}
define <vscale x 32 x i8> @vandn_vx_swapped_nxv32i8(i8 %x, <vscale x 32 x i8> %y) {
; CHECK-LABEL: vandn_vx_swapped_nxv32i8:
; CHECK: # %bb.0:
; CHECK-NEXT: not a0, a0
; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vx_swapped_nxv32i8:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a1, zero, e8, m4, ta, ma
; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a0
; CHECK-ZVKB-NEXT: ret
%a = xor i8 %x, -1
%head = insertelement <vscale x 32 x i8> poison, i8 %a, i32 0
%splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
%b = and <vscale x 32 x i8> %splat, %y
ret <vscale x 32 x i8> %b
}
define <vscale x 64 x i8> @vandn_vv_nxv64i8(<vscale x 64 x i8> %x, <vscale x 64 x i8> %y) {
; CHECK-LABEL: vandn_vv_nxv64i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma
; CHECK-NEXT: vnot.v v8, v8
; CHECK-NEXT: vand.vv v8, v8, v16
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vv_nxv64i8:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a0, zero, e8, m8, ta, ma
; CHECK-ZVKB-NEXT: vandn.vv v8, v16, v8
; CHECK-ZVKB-NEXT: ret
%a = xor <vscale x 64 x i8> %x, splat (i8 -1)
%b = and <vscale x 64 x i8> %a, %y
ret <vscale x 64 x i8> %b
}
define <vscale x 64 x i8> @vandn_vv_swapped_nxv64i8(<vscale x 64 x i8> %x, <vscale x 64 x i8> %y) {
; CHECK-LABEL: vandn_vv_swapped_nxv64i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma
; CHECK-NEXT: vnot.v v8, v8
; CHECK-NEXT: vand.vv v8, v16, v8
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vv_swapped_nxv64i8:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a0, zero, e8, m8, ta, ma
; CHECK-ZVKB-NEXT: vandn.vv v8, v16, v8
; CHECK-ZVKB-NEXT: ret
%a = xor <vscale x 64 x i8> %x, splat (i8 -1)
%b = and <vscale x 64 x i8> %y, %a
ret <vscale x 64 x i8> %b
}
define <vscale x 64 x i8> @vandn_vx_nxv64i8(i8 %x, <vscale x 64 x i8> %y) {
; CHECK-LABEL: vandn_vx_nxv64i8:
; CHECK: # %bb.0:
; CHECK-NEXT: not a0, a0
; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vx_nxv64i8:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a1, zero, e8, m8, ta, ma
; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a0
; CHECK-ZVKB-NEXT: ret
%a = xor i8 %x, -1
%head = insertelement <vscale x 64 x i8> poison, i8 %a, i32 0
%splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
%b = and <vscale x 64 x i8> %splat, %y
ret <vscale x 64 x i8> %b
}
define <vscale x 64 x i8> @vandn_vx_swapped_nxv64i8(i8 %x, <vscale x 64 x i8> %y) {
; CHECK-LABEL: vandn_vx_swapped_nxv64i8:
; CHECK: # %bb.0:
; CHECK-NEXT: not a0, a0
; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vx_swapped_nxv64i8:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a1, zero, e8, m8, ta, ma
; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a0
; CHECK-ZVKB-NEXT: ret
%a = xor i8 %x, -1
%head = insertelement <vscale x 64 x i8> poison, i8 %a, i32 0
%splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
%b = and <vscale x 64 x i8> %splat, %y
ret <vscale x 64 x i8> %b
}
define <vscale x 1 x i16> @vandn_vv_nxv1i16(<vscale x 1 x i16> %x, <vscale x 1 x i16> %y) {
; CHECK-LABEL: vandn_vv_nxv1i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
; CHECK-NEXT: vnot.v v8, v8
; CHECK-NEXT: vand.vv v8, v8, v9
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vv_nxv1i16:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
; CHECK-ZVKB-NEXT: vandn.vv v8, v9, v8
; CHECK-ZVKB-NEXT: ret
%a = xor <vscale x 1 x i16> %x, splat (i16 -1)
%b = and <vscale x 1 x i16> %a, %y
ret <vscale x 1 x i16> %b
}
define <vscale x 1 x i16> @vandn_vv_swapped_nxv1i16(<vscale x 1 x i16> %x, <vscale x 1 x i16> %y) {
; CHECK-LABEL: vandn_vv_swapped_nxv1i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
; CHECK-NEXT: vnot.v v8, v8
; CHECK-NEXT: vand.vv v8, v9, v8
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vv_swapped_nxv1i16:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
; CHECK-ZVKB-NEXT: vandn.vv v8, v9, v8
; CHECK-ZVKB-NEXT: ret
%a = xor <vscale x 1 x i16> %x, splat (i16 -1)
%b = and <vscale x 1 x i16> %y, %a
ret <vscale x 1 x i16> %b
}
define <vscale x 1 x i16> @vandn_vx_nxv1i16(i16 %x, <vscale x 1 x i16> %y) {
; CHECK-LABEL: vandn_vx_nxv1i16:
; CHECK: # %bb.0:
; CHECK-NEXT: not a0, a0
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vx_nxv1i16:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a0
; CHECK-ZVKB-NEXT: ret
%a = xor i16 %x, -1
%head = insertelement <vscale x 1 x i16> poison, i16 %a, i32 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
%b = and <vscale x 1 x i16> %splat, %y
ret <vscale x 1 x i16> %b
}
define <vscale x 1 x i16> @vandn_vx_swapped_nxv1i16(i16 %x, <vscale x 1 x i16> %y) {
; CHECK-LABEL: vandn_vx_swapped_nxv1i16:
; CHECK: # %bb.0:
; CHECK-NEXT: not a0, a0
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vx_swapped_nxv1i16:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a0
; CHECK-ZVKB-NEXT: ret
%a = xor i16 %x, -1
%head = insertelement <vscale x 1 x i16> poison, i16 %a, i32 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
%b = and <vscale x 1 x i16> %splat, %y
ret <vscale x 1 x i16> %b
}
define <vscale x 2 x i16> @vandn_vv_nxv2i16(<vscale x 2 x i16> %x, <vscale x 2 x i16> %y) {
; CHECK-LABEL: vandn_vv_nxv2i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
; CHECK-NEXT: vnot.v v8, v8
; CHECK-NEXT: vand.vv v8, v8, v9
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vv_nxv2i16:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
; CHECK-ZVKB-NEXT: vandn.vv v8, v9, v8
; CHECK-ZVKB-NEXT: ret
%a = xor <vscale x 2 x i16> %x, splat (i16 -1)
%b = and <vscale x 2 x i16> %a, %y
ret <vscale x 2 x i16> %b
}
define <vscale x 2 x i16> @vandn_vv_swapped_nxv2i16(<vscale x 2 x i16> %x, <vscale x 2 x i16> %y) {
; CHECK-LABEL: vandn_vv_swapped_nxv2i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
; CHECK-NEXT: vnot.v v8, v8
; CHECK-NEXT: vand.vv v8, v9, v8
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vv_swapped_nxv2i16:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
; CHECK-ZVKB-NEXT: vandn.vv v8, v9, v8
; CHECK-ZVKB-NEXT: ret
%a = xor <vscale x 2 x i16> %x, splat (i16 -1)
%b = and <vscale x 2 x i16> %y, %a
ret <vscale x 2 x i16> %b
}
define <vscale x 2 x i16> @vandn_vx_nxv2i16(i16 %x, <vscale x 2 x i16> %y) {
; CHECK-LABEL: vandn_vx_nxv2i16:
; CHECK: # %bb.0:
; CHECK-NEXT: not a0, a0
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vx_nxv2i16:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a0
; CHECK-ZVKB-NEXT: ret
%a = xor i16 %x, -1
%head = insertelement <vscale x 2 x i16> poison, i16 %a, i32 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
%b = and <vscale x 2 x i16> %splat, %y
ret <vscale x 2 x i16> %b
}
define <vscale x 2 x i16> @vandn_vx_swapped_nxv2i16(i16 %x, <vscale x 2 x i16> %y) {
; CHECK-LABEL: vandn_vx_swapped_nxv2i16:
; CHECK: # %bb.0:
; CHECK-NEXT: not a0, a0
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vx_swapped_nxv2i16:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a0
; CHECK-ZVKB-NEXT: ret
%a = xor i16 %x, -1
%head = insertelement <vscale x 2 x i16> poison, i16 %a, i32 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
%b = and <vscale x 2 x i16> %splat, %y
ret <vscale x 2 x i16> %b
}
define <vscale x 4 x i16> @vandn_vv_nxv4i16(<vscale x 4 x i16> %x, <vscale x 4 x i16> %y) {
; CHECK-LABEL: vandn_vv_nxv4i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; CHECK-NEXT: vnot.v v8, v8
; CHECK-NEXT: vand.vv v8, v8, v9
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vv_nxv4i16:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; CHECK-ZVKB-NEXT: vandn.vv v8, v9, v8
; CHECK-ZVKB-NEXT: ret
%a = xor <vscale x 4 x i16> %x, splat (i16 -1)
%b = and <vscale x 4 x i16> %a, %y
ret <vscale x 4 x i16> %b
}
define <vscale x 4 x i16> @vandn_vv_swapped_nxv4i16(<vscale x 4 x i16> %x, <vscale x 4 x i16> %y) {
; CHECK-LABEL: vandn_vv_swapped_nxv4i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; CHECK-NEXT: vnot.v v8, v8
; CHECK-NEXT: vand.vv v8, v9, v8
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vv_swapped_nxv4i16:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; CHECK-ZVKB-NEXT: vandn.vv v8, v9, v8
; CHECK-ZVKB-NEXT: ret
%a = xor <vscale x 4 x i16> %x, splat (i16 -1)
%b = and <vscale x 4 x i16> %y, %a
ret <vscale x 4 x i16> %b
}
define <vscale x 4 x i16> @vandn_vx_nxv4i16(i16 %x, <vscale x 4 x i16> %y) {
; CHECK-LABEL: vandn_vx_nxv4i16:
; CHECK: # %bb.0:
; CHECK-NEXT: not a0, a0
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vx_nxv4i16:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a0
; CHECK-ZVKB-NEXT: ret
%a = xor i16 %x, -1
%head = insertelement <vscale x 4 x i16> poison, i16 %a, i32 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
%b = and <vscale x 4 x i16> %splat, %y
ret <vscale x 4 x i16> %b
}
define <vscale x 4 x i16> @vandn_vx_swapped_nxv4i16(i16 %x, <vscale x 4 x i16> %y) {
; CHECK-LABEL: vandn_vx_swapped_nxv4i16:
; CHECK: # %bb.0:
; CHECK-NEXT: not a0, a0
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vx_swapped_nxv4i16:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a0
; CHECK-ZVKB-NEXT: ret
%a = xor i16 %x, -1
%head = insertelement <vscale x 4 x i16> poison, i16 %a, i32 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
%b = and <vscale x 4 x i16> %splat, %y
ret <vscale x 4 x i16> %b
}
define <vscale x 8 x i16> @vandn_vv_nxv8i16(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y) {
; CHECK-LABEL: vandn_vv_nxv8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
; CHECK-NEXT: vnot.v v8, v8
; CHECK-NEXT: vand.vv v8, v8, v10
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vv_nxv8i16:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a0, zero, e16, m2, ta, ma
; CHECK-ZVKB-NEXT: vandn.vv v8, v10, v8
; CHECK-ZVKB-NEXT: ret
%a = xor <vscale x 8 x i16> %x, splat (i16 -1)
%b = and <vscale x 8 x i16> %a, %y
ret <vscale x 8 x i16> %b
}
define <vscale x 8 x i16> @vandn_vv_swapped_nxv8i16(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y) {
; CHECK-LABEL: vandn_vv_swapped_nxv8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
; CHECK-NEXT: vnot.v v8, v8
; CHECK-NEXT: vand.vv v8, v10, v8
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vv_swapped_nxv8i16:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a0, zero, e16, m2, ta, ma
; CHECK-ZVKB-NEXT: vandn.vv v8, v10, v8
; CHECK-ZVKB-NEXT: ret
%a = xor <vscale x 8 x i16> %x, splat (i16 -1)
%b = and <vscale x 8 x i16> %y, %a
ret <vscale x 8 x i16> %b
}
define <vscale x 8 x i16> @vandn_vx_nxv8i16(i16 %x, <vscale x 8 x i16> %y) {
; CHECK-LABEL: vandn_vx_nxv8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: not a0, a0
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vx_nxv8i16:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a0
; CHECK-ZVKB-NEXT: ret
%a = xor i16 %x, -1
%head = insertelement <vscale x 8 x i16> poison, i16 %a, i32 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
%b = and <vscale x 8 x i16> %splat, %y
ret <vscale x 8 x i16> %b
}
define <vscale x 8 x i16> @vandn_vx_swapped_nxv8i16(i16 %x, <vscale x 8 x i16> %y) {
; CHECK-LABEL: vandn_vx_swapped_nxv8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: not a0, a0
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vx_swapped_nxv8i16:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a0
; CHECK-ZVKB-NEXT: ret
%a = xor i16 %x, -1
%head = insertelement <vscale x 8 x i16> poison, i16 %a, i32 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
%b = and <vscale x 8 x i16> %splat, %y
ret <vscale x 8 x i16> %b
}
define <vscale x 16 x i16> @vandn_vv_nxv16i16(<vscale x 16 x i16> %x, <vscale x 16 x i16> %y) {
; CHECK-LABEL: vandn_vv_nxv16i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; CHECK-NEXT: vnot.v v8, v8
; CHECK-NEXT: vand.vv v8, v8, v12
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vv_nxv16i16:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; CHECK-ZVKB-NEXT: vandn.vv v8, v12, v8
; CHECK-ZVKB-NEXT: ret
%a = xor <vscale x 16 x i16> %x, splat (i16 -1)
%b = and <vscale x 16 x i16> %a, %y
ret <vscale x 16 x i16> %b
}
define <vscale x 16 x i16> @vandn_vv_swapped_nxv16i16(<vscale x 16 x i16> %x, <vscale x 16 x i16> %y) {
; CHECK-LABEL: vandn_vv_swapped_nxv16i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; CHECK-NEXT: vnot.v v8, v8
; CHECK-NEXT: vand.vv v8, v12, v8
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vv_swapped_nxv16i16:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; CHECK-ZVKB-NEXT: vandn.vv v8, v12, v8
; CHECK-ZVKB-NEXT: ret
%a = xor <vscale x 16 x i16> %x, splat (i16 -1)
%b = and <vscale x 16 x i16> %y, %a
ret <vscale x 16 x i16> %b
}
define <vscale x 16 x i16> @vandn_vx_nxv16i16(i16 %x, <vscale x 16 x i16> %y) {
; CHECK-LABEL: vandn_vx_nxv16i16:
; CHECK: # %bb.0:
; CHECK-NEXT: not a0, a0
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vx_nxv16i16:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a0
; CHECK-ZVKB-NEXT: ret
%a = xor i16 %x, -1
%head = insertelement <vscale x 16 x i16> poison, i16 %a, i32 0
%splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
%b = and <vscale x 16 x i16> %splat, %y
ret <vscale x 16 x i16> %b
}
define <vscale x 16 x i16> @vandn_vx_swapped_nxv16i16(i16 %x, <vscale x 16 x i16> %y) {
; CHECK-LABEL: vandn_vx_swapped_nxv16i16:
; CHECK: # %bb.0:
; CHECK-NEXT: not a0, a0
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vx_swapped_nxv16i16:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a0
; CHECK-ZVKB-NEXT: ret
%a = xor i16 %x, -1
%head = insertelement <vscale x 16 x i16> poison, i16 %a, i32 0
%splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
%b = and <vscale x 16 x i16> %splat, %y
ret <vscale x 16 x i16> %b
}
define <vscale x 32 x i16> @vandn_vv_nxv32i16(<vscale x 32 x i16> %x, <vscale x 32 x i16> %y) {
; CHECK-LABEL: vandn_vv_nxv32i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma
; CHECK-NEXT: vnot.v v8, v8
; CHECK-NEXT: vand.vv v8, v8, v16
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vv_nxv32i16:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a0, zero, e16, m8, ta, ma
; CHECK-ZVKB-NEXT: vandn.vv v8, v16, v8
; CHECK-ZVKB-NEXT: ret
%a = xor <vscale x 32 x i16> %x, splat (i16 -1)
%b = and <vscale x 32 x i16> %a, %y
ret <vscale x 32 x i16> %b
}
define <vscale x 32 x i16> @vandn_vv_swapped_nxv32i16(<vscale x 32 x i16> %x, <vscale x 32 x i16> %y) {
; CHECK-LABEL: vandn_vv_swapped_nxv32i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma
; CHECK-NEXT: vnot.v v8, v8
; CHECK-NEXT: vand.vv v8, v16, v8
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vv_swapped_nxv32i16:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a0, zero, e16, m8, ta, ma
; CHECK-ZVKB-NEXT: vandn.vv v8, v16, v8
; CHECK-ZVKB-NEXT: ret
%a = xor <vscale x 32 x i16> %x, splat (i16 -1)
%b = and <vscale x 32 x i16> %y, %a
ret <vscale x 32 x i16> %b
}
define <vscale x 32 x i16> @vandn_vx_nxv32i16(i16 %x, <vscale x 32 x i16> %y) {
; CHECK-LABEL: vandn_vx_nxv32i16:
; CHECK: # %bb.0:
; CHECK-NEXT: not a0, a0
; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vx_nxv32i16:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a1, zero, e16, m8, ta, ma
; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a0
; CHECK-ZVKB-NEXT: ret
%a = xor i16 %x, -1
%head = insertelement <vscale x 32 x i16> poison, i16 %a, i32 0
%splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
%b = and <vscale x 32 x i16> %splat, %y
ret <vscale x 32 x i16> %b
}
define <vscale x 32 x i16> @vandn_vx_swapped_nxv32i16(i16 %x, <vscale x 32 x i16> %y) {
; CHECK-LABEL: vandn_vx_swapped_nxv32i16:
; CHECK: # %bb.0:
; CHECK-NEXT: not a0, a0
; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vx_swapped_nxv32i16:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a1, zero, e16, m8, ta, ma
; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a0
; CHECK-ZVKB-NEXT: ret
%a = xor i16 %x, -1
%head = insertelement <vscale x 32 x i16> poison, i16 %a, i32 0
%splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
%b = and <vscale x 32 x i16> %splat, %y
ret <vscale x 32 x i16> %b
}
define <vscale x 1 x i32> @vandn_vv_nxv1i32(<vscale x 1 x i32> %x, <vscale x 1 x i32> %y) {
; CHECK-LABEL: vandn_vv_nxv1i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
; CHECK-NEXT: vnot.v v8, v8
; CHECK-NEXT: vand.vv v8, v8, v9
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vv_nxv1i32:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
; CHECK-ZVKB-NEXT: vandn.vv v8, v9, v8
; CHECK-ZVKB-NEXT: ret
%a = xor <vscale x 1 x i32> %x, splat (i32 -1)
%b = and <vscale x 1 x i32> %a, %y
ret <vscale x 1 x i32> %b
}
define <vscale x 1 x i32> @vandn_vv_swapped_nxv1i32(<vscale x 1 x i32> %x, <vscale x 1 x i32> %y) {
; CHECK-LABEL: vandn_vv_swapped_nxv1i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
; CHECK-NEXT: vnot.v v8, v8
; CHECK-NEXT: vand.vv v8, v9, v8
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vv_swapped_nxv1i32:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
; CHECK-ZVKB-NEXT: vandn.vv v8, v9, v8
; CHECK-ZVKB-NEXT: ret
%a = xor <vscale x 1 x i32> %x, splat (i32 -1)
%b = and <vscale x 1 x i32> %y, %a
ret <vscale x 1 x i32> %b
}
define <vscale x 1 x i32> @vandn_vx_nxv1i32(i32 %x, <vscale x 1 x i32> %y) {
; CHECK-LABEL: vandn_vx_nxv1i32:
; CHECK: # %bb.0:
; CHECK-NEXT: not a0, a0
; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vx_nxv1i32:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a0
; CHECK-ZVKB-NEXT: ret
%a = xor i32 %x, -1
%head = insertelement <vscale x 1 x i32> poison, i32 %a, i32 0
%splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
%b = and <vscale x 1 x i32> %splat, %y
ret <vscale x 1 x i32> %b
}
define <vscale x 1 x i32> @vandn_vx_swapped_nxv1i32(i32 %x, <vscale x 1 x i32> %y) {
; CHECK-LABEL: vandn_vx_swapped_nxv1i32:
; CHECK: # %bb.0:
; CHECK-NEXT: not a0, a0
; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vx_swapped_nxv1i32:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a0
; CHECK-ZVKB-NEXT: ret
%a = xor i32 %x, -1
%head = insertelement <vscale x 1 x i32> poison, i32 %a, i32 0
%splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
%b = and <vscale x 1 x i32> %splat, %y
ret <vscale x 1 x i32> %b
}
define <vscale x 2 x i32> @vandn_vv_nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y) {
; CHECK-LABEL: vandn_vv_nxv2i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
; CHECK-NEXT: vnot.v v8, v8
; CHECK-NEXT: vand.vv v8, v8, v9
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vv_nxv2i32:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a0, zero, e32, m1, ta, ma
; CHECK-ZVKB-NEXT: vandn.vv v8, v9, v8
; CHECK-ZVKB-NEXT: ret
%a = xor <vscale x 2 x i32> %x, splat (i32 -1)
%b = and <vscale x 2 x i32> %a, %y
ret <vscale x 2 x i32> %b
}
define <vscale x 2 x i32> @vandn_vv_swapped_nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y) {
; CHECK-LABEL: vandn_vv_swapped_nxv2i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
; CHECK-NEXT: vnot.v v8, v8
; CHECK-NEXT: vand.vv v8, v9, v8
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vv_swapped_nxv2i32:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a0, zero, e32, m1, ta, ma
; CHECK-ZVKB-NEXT: vandn.vv v8, v9, v8
; CHECK-ZVKB-NEXT: ret
%a = xor <vscale x 2 x i32> %x, splat (i32 -1)
%b = and <vscale x 2 x i32> %y, %a
ret <vscale x 2 x i32> %b
}
define <vscale x 2 x i32> @vandn_vx_nxv2i32(i32 %x, <vscale x 2 x i32> %y) {
; CHECK-LABEL: vandn_vx_nxv2i32:
; CHECK: # %bb.0:
; CHECK-NEXT: not a0, a0
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vx_nxv2i32:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a1, zero, e32, m1, ta, ma
; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a0
; CHECK-ZVKB-NEXT: ret
%a = xor i32 %x, -1
%head = insertelement <vscale x 2 x i32> poison, i32 %a, i32 0
%splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
%b = and <vscale x 2 x i32> %splat, %y
ret <vscale x 2 x i32> %b
}
define <vscale x 2 x i32> @vandn_vx_swapped_nxv2i32(i32 %x, <vscale x 2 x i32> %y) {
; CHECK-LABEL: vandn_vx_swapped_nxv2i32:
; CHECK: # %bb.0:
; CHECK-NEXT: not a0, a0
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vx_swapped_nxv2i32:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a1, zero, e32, m1, ta, ma
; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a0
; CHECK-ZVKB-NEXT: ret
%a = xor i32 %x, -1
%head = insertelement <vscale x 2 x i32> poison, i32 %a, i32 0
%splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
%b = and <vscale x 2 x i32> %splat, %y
ret <vscale x 2 x i32> %b
}
define <vscale x 4 x i32> @vandn_vv_nxv4i32(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y) {
; CHECK-LABEL: vandn_vv_nxv4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
; CHECK-NEXT: vnot.v v8, v8
; CHECK-NEXT: vand.vv v8, v8, v10
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vv_nxv4i32:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a0, zero, e32, m2, ta, ma
; CHECK-ZVKB-NEXT: vandn.vv v8, v10, v8
; CHECK-ZVKB-NEXT: ret
%a = xor <vscale x 4 x i32> %x, splat (i32 -1)
%b = and <vscale x 4 x i32> %a, %y
ret <vscale x 4 x i32> %b
}
define <vscale x 4 x i32> @vandn_vv_swapped_nxv4i32(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y) {
; CHECK-LABEL: vandn_vv_swapped_nxv4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
; CHECK-NEXT: vnot.v v8, v8
; CHECK-NEXT: vand.vv v8, v10, v8
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vv_swapped_nxv4i32:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a0, zero, e32, m2, ta, ma
; CHECK-ZVKB-NEXT: vandn.vv v8, v10, v8
; CHECK-ZVKB-NEXT: ret
%a = xor <vscale x 4 x i32> %x, splat (i32 -1)
%b = and <vscale x 4 x i32> %y, %a
ret <vscale x 4 x i32> %b
}
define <vscale x 4 x i32> @vandn_vx_nxv4i32(i32 %x, <vscale x 4 x i32> %y) {
; CHECK-LABEL: vandn_vx_nxv4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: not a0, a0
; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vx_nxv4i32:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a1, zero, e32, m2, ta, ma
; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a0
; CHECK-ZVKB-NEXT: ret
%a = xor i32 %x, -1
%head = insertelement <vscale x 4 x i32> poison, i32 %a, i32 0
%splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
%b = and <vscale x 4 x i32> %splat, %y
ret <vscale x 4 x i32> %b
}
define <vscale x 4 x i32> @vandn_vx_swapped_nxv4i32(i32 %x, <vscale x 4 x i32> %y) {
; CHECK-LABEL: vandn_vx_swapped_nxv4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: not a0, a0
; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vx_swapped_nxv4i32:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a1, zero, e32, m2, ta, ma
; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a0
; CHECK-ZVKB-NEXT: ret
%a = xor i32 %x, -1
%head = insertelement <vscale x 4 x i32> poison, i32 %a, i32 0
%splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
%b = and <vscale x 4 x i32> %splat, %y
ret <vscale x 4 x i32> %b
}
define <vscale x 8 x i32> @vandn_vv_nxv8i32(<vscale x 8 x i32> %x, <vscale x 8 x i32> %y) {
; CHECK-LABEL: vandn_vv_nxv8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
; CHECK-NEXT: vnot.v v8, v8
; CHECK-NEXT: vand.vv v8, v8, v12
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vv_nxv8i32:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a0, zero, e32, m4, ta, ma
; CHECK-ZVKB-NEXT: vandn.vv v8, v12, v8
; CHECK-ZVKB-NEXT: ret
%a = xor <vscale x 8 x i32> %x, splat (i32 -1)
%b = and <vscale x 8 x i32> %a, %y
ret <vscale x 8 x i32> %b
}
define <vscale x 8 x i32> @vandn_vv_swapped_nxv8i32(<vscale x 8 x i32> %x, <vscale x 8 x i32> %y) {
; CHECK-LABEL: vandn_vv_swapped_nxv8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
; CHECK-NEXT: vnot.v v8, v8
; CHECK-NEXT: vand.vv v8, v12, v8
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vv_swapped_nxv8i32:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a0, zero, e32, m4, ta, ma
; CHECK-ZVKB-NEXT: vandn.vv v8, v12, v8
; CHECK-ZVKB-NEXT: ret
%a = xor <vscale x 8 x i32> %x, splat (i32 -1)
%b = and <vscale x 8 x i32> %y, %a
ret <vscale x 8 x i32> %b
}
define <vscale x 8 x i32> @vandn_vx_nxv8i32(i32 %x, <vscale x 8 x i32> %y) {
; CHECK-LABEL: vandn_vx_nxv8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: not a0, a0
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vx_nxv8i32:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a1, zero, e32, m4, ta, ma
; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a0
; CHECK-ZVKB-NEXT: ret
%a = xor i32 %x, -1
%head = insertelement <vscale x 8 x i32> poison, i32 %a, i32 0
%splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
%b = and <vscale x 8 x i32> %splat, %y
ret <vscale x 8 x i32> %b
}
define <vscale x 8 x i32> @vandn_vx_swapped_nxv8i32(i32 %x, <vscale x 8 x i32> %y) {
; CHECK-LABEL: vandn_vx_swapped_nxv8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: not a0, a0
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vx_swapped_nxv8i32:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a1, zero, e32, m4, ta, ma
; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a0
; CHECK-ZVKB-NEXT: ret
%a = xor i32 %x, -1
%head = insertelement <vscale x 8 x i32> poison, i32 %a, i32 0
%splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
%b = and <vscale x 8 x i32> %splat, %y
ret <vscale x 8 x i32> %b
}
define <vscale x 16 x i32> @vandn_vv_nxv16i32(<vscale x 16 x i32> %x, <vscale x 16 x i32> %y) {
; CHECK-LABEL: vandn_vv_nxv16i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma
; CHECK-NEXT: vnot.v v8, v8
; CHECK-NEXT: vand.vv v8, v8, v16
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vv_nxv16i32:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a0, zero, e32, m8, ta, ma
; CHECK-ZVKB-NEXT: vandn.vv v8, v16, v8
; CHECK-ZVKB-NEXT: ret
%a = xor <vscale x 16 x i32> %x, splat (i32 -1)
%b = and <vscale x 16 x i32> %a, %y
ret <vscale x 16 x i32> %b
}
define <vscale x 16 x i32> @vandn_vv_swapped_nxv16i32(<vscale x 16 x i32> %x, <vscale x 16 x i32> %y) {
; CHECK-LABEL: vandn_vv_swapped_nxv16i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma
; CHECK-NEXT: vnot.v v8, v8
; CHECK-NEXT: vand.vv v8, v16, v8
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vv_swapped_nxv16i32:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a0, zero, e32, m8, ta, ma
; CHECK-ZVKB-NEXT: vandn.vv v8, v16, v8
; CHECK-ZVKB-NEXT: ret
%a = xor <vscale x 16 x i32> %x, splat (i32 -1)
%b = and <vscale x 16 x i32> %y, %a
ret <vscale x 16 x i32> %b
}
define <vscale x 16 x i32> @vandn_vx_nxv16i32(i32 %x, <vscale x 16 x i32> %y) {
; CHECK-LABEL: vandn_vx_nxv16i32:
; CHECK: # %bb.0:
; CHECK-NEXT: not a0, a0
; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vx_nxv16i32:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a1, zero, e32, m8, ta, ma
; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a0
; CHECK-ZVKB-NEXT: ret
%a = xor i32 %x, -1
%head = insertelement <vscale x 16 x i32> poison, i32 %a, i32 0
%splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
%b = and <vscale x 16 x i32> %splat, %y
ret <vscale x 16 x i32> %b
}
define <vscale x 16 x i32> @vandn_vx_swapped_nxv16i32(i32 %x, <vscale x 16 x i32> %y) {
; CHECK-LABEL: vandn_vx_swapped_nxv16i32:
; CHECK: # %bb.0:
; CHECK-NEXT: not a0, a0
; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vx_swapped_nxv16i32:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a1, zero, e32, m8, ta, ma
; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a0
; CHECK-ZVKB-NEXT: ret
%a = xor i32 %x, -1
%head = insertelement <vscale x 16 x i32> poison, i32 %a, i32 0
%splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
%b = and <vscale x 16 x i32> %splat, %y
ret <vscale x 16 x i32> %b
}
define <vscale x 1 x i64> @vandn_vv_nxv1i64(<vscale x 1 x i64> %x, <vscale x 1 x i64> %y) {
; CHECK-LABEL: vandn_vv_nxv1i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
; CHECK-NEXT: vnot.v v8, v8
; CHECK-NEXT: vand.vv v8, v8, v9
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vv_nxv1i64:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a0, zero, e64, m1, ta, ma
; CHECK-ZVKB-NEXT: vandn.vv v8, v9, v8
; CHECK-ZVKB-NEXT: ret
%a = xor <vscale x 1 x i64> %x, splat (i64 -1)
%b = and <vscale x 1 x i64> %a, %y
ret <vscale x 1 x i64> %b
}
define <vscale x 1 x i64> @vandn_vv_swapped_nxv1i64(<vscale x 1 x i64> %x, <vscale x 1 x i64> %y) {
; CHECK-LABEL: vandn_vv_swapped_nxv1i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
; CHECK-NEXT: vnot.v v8, v8
; CHECK-NEXT: vand.vv v8, v9, v8
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vv_swapped_nxv1i64:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a0, zero, e64, m1, ta, ma
; CHECK-ZVKB-NEXT: vandn.vv v8, v9, v8
; CHECK-ZVKB-NEXT: ret
%a = xor <vscale x 1 x i64> %x, splat (i64 -1)
%b = and <vscale x 1 x i64> %y, %a
ret <vscale x 1 x i64> %b
}
define <vscale x 1 x i64> @vandn_vx_nxv1i64(i64 %x, <vscale x 1 x i64> %y) {
; CHECK-RV32-LABEL: vandn_vx_nxv1i64:
; CHECK-RV32: # %bb.0:
; CHECK-RV32-NEXT: addi sp, sp, -16
; CHECK-RV32-NEXT: .cfi_def_cfa_offset 16
; CHECK-RV32-NEXT: not a0, a0
; CHECK-RV32-NEXT: not a1, a1
; CHECK-RV32-NEXT: sw a0, 8(sp)
; CHECK-RV32-NEXT: sw a1, 12(sp)
; CHECK-RV32-NEXT: addi a0, sp, 8
; CHECK-RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
; CHECK-RV32-NEXT: vlse64.v v9, (a0), zero
; CHECK-RV32-NEXT: vand.vv v8, v9, v8
; CHECK-RV32-NEXT: addi sp, sp, 16
; CHECK-RV32-NEXT: .cfi_def_cfa_offset 0
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: vandn_vx_nxv1i64:
; CHECK-RV64: # %bb.0:
; CHECK-RV64-NEXT: not a0, a0
; CHECK-RV64-NEXT: vsetvli a1, zero, e64, m1, ta, ma
; CHECK-RV64-NEXT: vand.vx v8, v8, a0
; CHECK-RV64-NEXT: ret
;
; CHECK-ZVKB32-LABEL: vandn_vx_nxv1i64:
; CHECK-ZVKB32: # %bb.0:
; CHECK-ZVKB32-NEXT: addi sp, sp, -16
; CHECK-ZVKB32-NEXT: .cfi_def_cfa_offset 16
; CHECK-ZVKB32-NEXT: not a0, a0
; CHECK-ZVKB32-NEXT: not a1, a1
; CHECK-ZVKB32-NEXT: sw a0, 8(sp)
; CHECK-ZVKB32-NEXT: sw a1, 12(sp)
; CHECK-ZVKB32-NEXT: addi a0, sp, 8
; CHECK-ZVKB32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
; CHECK-ZVKB32-NEXT: vlse64.v v9, (a0), zero
; CHECK-ZVKB32-NEXT: vand.vv v8, v9, v8
; CHECK-ZVKB32-NEXT: addi sp, sp, 16
; CHECK-ZVKB32-NEXT: .cfi_def_cfa_offset 0
; CHECK-ZVKB32-NEXT: ret
;
; CHECK-ZVKB64-LABEL: vandn_vx_nxv1i64:
; CHECK-ZVKB64: # %bb.0:
; CHECK-ZVKB64-NEXT: vsetvli a1, zero, e64, m1, ta, ma
; CHECK-ZVKB64-NEXT: vandn.vx v8, v8, a0
; CHECK-ZVKB64-NEXT: ret
%a = xor i64 %x, -1
%head = insertelement <vscale x 1 x i64> poison, i64 %a, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
%b = and <vscale x 1 x i64> %splat, %y
ret <vscale x 1 x i64> %b
}
define <vscale x 1 x i64> @vandn_vx_swapped_nxv1i64(i64 %x, <vscale x 1 x i64> %y) {
; CHECK-RV32-LABEL: vandn_vx_swapped_nxv1i64:
; CHECK-RV32: # %bb.0:
; CHECK-RV32-NEXT: addi sp, sp, -16
; CHECK-RV32-NEXT: .cfi_def_cfa_offset 16
; CHECK-RV32-NEXT: not a0, a0
; CHECK-RV32-NEXT: not a1, a1
; CHECK-RV32-NEXT: sw a0, 8(sp)
; CHECK-RV32-NEXT: sw a1, 12(sp)
; CHECK-RV32-NEXT: addi a0, sp, 8
; CHECK-RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
; CHECK-RV32-NEXT: vlse64.v v9, (a0), zero
; CHECK-RV32-NEXT: vand.vv v8, v9, v8
; CHECK-RV32-NEXT: addi sp, sp, 16
; CHECK-RV32-NEXT: .cfi_def_cfa_offset 0
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: vandn_vx_swapped_nxv1i64:
; CHECK-RV64: # %bb.0:
; CHECK-RV64-NEXT: not a0, a0
; CHECK-RV64-NEXT: vsetvli a1, zero, e64, m1, ta, ma
; CHECK-RV64-NEXT: vand.vx v8, v8, a0
; CHECK-RV64-NEXT: ret
;
; CHECK-ZVKB32-LABEL: vandn_vx_swapped_nxv1i64:
; CHECK-ZVKB32: # %bb.0:
; CHECK-ZVKB32-NEXT: addi sp, sp, -16
; CHECK-ZVKB32-NEXT: .cfi_def_cfa_offset 16
; CHECK-ZVKB32-NEXT: not a0, a0
; CHECK-ZVKB32-NEXT: not a1, a1
; CHECK-ZVKB32-NEXT: sw a0, 8(sp)
; CHECK-ZVKB32-NEXT: sw a1, 12(sp)
; CHECK-ZVKB32-NEXT: addi a0, sp, 8
; CHECK-ZVKB32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
; CHECK-ZVKB32-NEXT: vlse64.v v9, (a0), zero
; CHECK-ZVKB32-NEXT: vand.vv v8, v9, v8
; CHECK-ZVKB32-NEXT: addi sp, sp, 16
; CHECK-ZVKB32-NEXT: .cfi_def_cfa_offset 0
; CHECK-ZVKB32-NEXT: ret
;
; CHECK-ZVKB64-LABEL: vandn_vx_swapped_nxv1i64:
; CHECK-ZVKB64: # %bb.0:
; CHECK-ZVKB64-NEXT: vsetvli a1, zero, e64, m1, ta, ma
; CHECK-ZVKB64-NEXT: vandn.vx v8, v8, a0
; CHECK-ZVKB64-NEXT: ret
%a = xor i64 %x, -1
%head = insertelement <vscale x 1 x i64> poison, i64 %a, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
%b = and <vscale x 1 x i64> %splat, %y
ret <vscale x 1 x i64> %b
}
define <vscale x 2 x i64> @vandn_vv_nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y) {
; CHECK-LABEL: vandn_vv_nxv2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
; CHECK-NEXT: vnot.v v8, v8
; CHECK-NEXT: vand.vv v8, v8, v10
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vv_nxv2i64:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a0, zero, e64, m2, ta, ma
; CHECK-ZVKB-NEXT: vandn.vv v8, v10, v8
; CHECK-ZVKB-NEXT: ret
%a = xor <vscale x 2 x i64> %x, splat (i64 -1)
%b = and <vscale x 2 x i64> %a, %y
ret <vscale x 2 x i64> %b
}
define <vscale x 2 x i64> @vandn_vv_swapped_nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y) {
; CHECK-LABEL: vandn_vv_swapped_nxv2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
; CHECK-NEXT: vnot.v v8, v8
; CHECK-NEXT: vand.vv v8, v10, v8
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vv_swapped_nxv2i64:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a0, zero, e64, m2, ta, ma
; CHECK-ZVKB-NEXT: vandn.vv v8, v10, v8
; CHECK-ZVKB-NEXT: ret
%a = xor <vscale x 2 x i64> %x, splat (i64 -1)
%b = and <vscale x 2 x i64> %y, %a
ret <vscale x 2 x i64> %b
}
define <vscale x 2 x i64> @vandn_vx_nxv2i64(i64 %x, <vscale x 2 x i64> %y) {
; CHECK-RV32-LABEL: vandn_vx_nxv2i64:
; CHECK-RV32: # %bb.0:
; CHECK-RV32-NEXT: addi sp, sp, -16
; CHECK-RV32-NEXT: .cfi_def_cfa_offset 16
; CHECK-RV32-NEXT: not a0, a0
; CHECK-RV32-NEXT: not a1, a1
; CHECK-RV32-NEXT: sw a0, 8(sp)
; CHECK-RV32-NEXT: sw a1, 12(sp)
; CHECK-RV32-NEXT: addi a0, sp, 8
; CHECK-RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
; CHECK-RV32-NEXT: vlse64.v v10, (a0), zero
; CHECK-RV32-NEXT: vand.vv v8, v10, v8
; CHECK-RV32-NEXT: addi sp, sp, 16
; CHECK-RV32-NEXT: .cfi_def_cfa_offset 0
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: vandn_vx_nxv2i64:
; CHECK-RV64: # %bb.0:
; CHECK-RV64-NEXT: not a0, a0
; CHECK-RV64-NEXT: vsetvli a1, zero, e64, m2, ta, ma
; CHECK-RV64-NEXT: vand.vx v8, v8, a0
; CHECK-RV64-NEXT: ret
;
; CHECK-ZVKB32-LABEL: vandn_vx_nxv2i64:
; CHECK-ZVKB32: # %bb.0:
; CHECK-ZVKB32-NEXT: addi sp, sp, -16
; CHECK-ZVKB32-NEXT: .cfi_def_cfa_offset 16
; CHECK-ZVKB32-NEXT: not a0, a0
; CHECK-ZVKB32-NEXT: not a1, a1
; CHECK-ZVKB32-NEXT: sw a0, 8(sp)
; CHECK-ZVKB32-NEXT: sw a1, 12(sp)
; CHECK-ZVKB32-NEXT: addi a0, sp, 8
; CHECK-ZVKB32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
; CHECK-ZVKB32-NEXT: vlse64.v v10, (a0), zero
; CHECK-ZVKB32-NEXT: vand.vv v8, v10, v8
; CHECK-ZVKB32-NEXT: addi sp, sp, 16
; CHECK-ZVKB32-NEXT: .cfi_def_cfa_offset 0
; CHECK-ZVKB32-NEXT: ret
;
; CHECK-ZVKB64-LABEL: vandn_vx_nxv2i64:
; CHECK-ZVKB64: # %bb.0:
; CHECK-ZVKB64-NEXT: vsetvli a1, zero, e64, m2, ta, ma
; CHECK-ZVKB64-NEXT: vandn.vx v8, v8, a0
; CHECK-ZVKB64-NEXT: ret
%a = xor i64 %x, -1
%head = insertelement <vscale x 2 x i64> poison, i64 %a, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
%b = and <vscale x 2 x i64> %splat, %y
ret <vscale x 2 x i64> %b
}
define <vscale x 2 x i64> @vandn_vx_swapped_nxv2i64(i64 %x, <vscale x 2 x i64> %y) {
; CHECK-RV32-LABEL: vandn_vx_swapped_nxv2i64:
; CHECK-RV32: # %bb.0:
; CHECK-RV32-NEXT: addi sp, sp, -16
; CHECK-RV32-NEXT: .cfi_def_cfa_offset 16
; CHECK-RV32-NEXT: not a0, a0
; CHECK-RV32-NEXT: not a1, a1
; CHECK-RV32-NEXT: sw a0, 8(sp)
; CHECK-RV32-NEXT: sw a1, 12(sp)
; CHECK-RV32-NEXT: addi a0, sp, 8
; CHECK-RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
; CHECK-RV32-NEXT: vlse64.v v10, (a0), zero
; CHECK-RV32-NEXT: vand.vv v8, v10, v8
; CHECK-RV32-NEXT: addi sp, sp, 16
; CHECK-RV32-NEXT: .cfi_def_cfa_offset 0
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: vandn_vx_swapped_nxv2i64:
; CHECK-RV64: # %bb.0:
; CHECK-RV64-NEXT: not a0, a0
; CHECK-RV64-NEXT: vsetvli a1, zero, e64, m2, ta, ma
; CHECK-RV64-NEXT: vand.vx v8, v8, a0
; CHECK-RV64-NEXT: ret
;
; CHECK-ZVKB32-LABEL: vandn_vx_swapped_nxv2i64:
; CHECK-ZVKB32: # %bb.0:
; CHECK-ZVKB32-NEXT: addi sp, sp, -16
; CHECK-ZVKB32-NEXT: .cfi_def_cfa_offset 16
; CHECK-ZVKB32-NEXT: not a0, a0
; CHECK-ZVKB32-NEXT: not a1, a1
; CHECK-ZVKB32-NEXT: sw a0, 8(sp)
; CHECK-ZVKB32-NEXT: sw a1, 12(sp)
; CHECK-ZVKB32-NEXT: addi a0, sp, 8
; CHECK-ZVKB32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
; CHECK-ZVKB32-NEXT: vlse64.v v10, (a0), zero
; CHECK-ZVKB32-NEXT: vand.vv v8, v10, v8
; CHECK-ZVKB32-NEXT: addi sp, sp, 16
; CHECK-ZVKB32-NEXT: .cfi_def_cfa_offset 0
; CHECK-ZVKB32-NEXT: ret
;
; CHECK-ZVKB64-LABEL: vandn_vx_swapped_nxv2i64:
; CHECK-ZVKB64: # %bb.0:
; CHECK-ZVKB64-NEXT: vsetvli a1, zero, e64, m2, ta, ma
; CHECK-ZVKB64-NEXT: vandn.vx v8, v8, a0
; CHECK-ZVKB64-NEXT: ret
%a = xor i64 %x, -1
%head = insertelement <vscale x 2 x i64> poison, i64 %a, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
%b = and <vscale x 2 x i64> %splat, %y
ret <vscale x 2 x i64> %b
}
define <vscale x 4 x i64> @vandn_vv_nxv4i64(<vscale x 4 x i64> %x, <vscale x 4 x i64> %y) {
; CHECK-LABEL: vandn_vv_nxv4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
; CHECK-NEXT: vnot.v v8, v8
; CHECK-NEXT: vand.vv v8, v8, v12
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vv_nxv4i64:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a0, zero, e64, m4, ta, ma
; CHECK-ZVKB-NEXT: vandn.vv v8, v12, v8
; CHECK-ZVKB-NEXT: ret
%a = xor <vscale x 4 x i64> %x, splat (i64 -1)
%b = and <vscale x 4 x i64> %a, %y
ret <vscale x 4 x i64> %b
}
define <vscale x 4 x i64> @vandn_vv_swapped_nxv4i64(<vscale x 4 x i64> %x, <vscale x 4 x i64> %y) {
; CHECK-LABEL: vandn_vv_swapped_nxv4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
; CHECK-NEXT: vnot.v v8, v8
; CHECK-NEXT: vand.vv v8, v12, v8
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vv_swapped_nxv4i64:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a0, zero, e64, m4, ta, ma
; CHECK-ZVKB-NEXT: vandn.vv v8, v12, v8
; CHECK-ZVKB-NEXT: ret
%a = xor <vscale x 4 x i64> %x, splat (i64 -1)
%b = and <vscale x 4 x i64> %y, %a
ret <vscale x 4 x i64> %b
}
define <vscale x 4 x i64> @vandn_vx_nxv4i64(i64 %x, <vscale x 4 x i64> %y) {
; CHECK-RV32-LABEL: vandn_vx_nxv4i64:
; CHECK-RV32: # %bb.0:
; CHECK-RV32-NEXT: addi sp, sp, -16
; CHECK-RV32-NEXT: .cfi_def_cfa_offset 16
; CHECK-RV32-NEXT: not a0, a0
; CHECK-RV32-NEXT: not a1, a1
; CHECK-RV32-NEXT: sw a0, 8(sp)
; CHECK-RV32-NEXT: sw a1, 12(sp)
; CHECK-RV32-NEXT: addi a0, sp, 8
; CHECK-RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
; CHECK-RV32-NEXT: vlse64.v v12, (a0), zero
; CHECK-RV32-NEXT: vand.vv v8, v12, v8
; CHECK-RV32-NEXT: addi sp, sp, 16
; CHECK-RV32-NEXT: .cfi_def_cfa_offset 0
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: vandn_vx_nxv4i64:
; CHECK-RV64: # %bb.0:
; CHECK-RV64-NEXT: not a0, a0
; CHECK-RV64-NEXT: vsetvli a1, zero, e64, m4, ta, ma
; CHECK-RV64-NEXT: vand.vx v8, v8, a0
; CHECK-RV64-NEXT: ret
;
; CHECK-ZVKB32-LABEL: vandn_vx_nxv4i64:
; CHECK-ZVKB32: # %bb.0:
; CHECK-ZVKB32-NEXT: addi sp, sp, -16
; CHECK-ZVKB32-NEXT: .cfi_def_cfa_offset 16
; CHECK-ZVKB32-NEXT: not a0, a0
; CHECK-ZVKB32-NEXT: not a1, a1
; CHECK-ZVKB32-NEXT: sw a0, 8(sp)
; CHECK-ZVKB32-NEXT: sw a1, 12(sp)
; CHECK-ZVKB32-NEXT: addi a0, sp, 8
; CHECK-ZVKB32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
; CHECK-ZVKB32-NEXT: vlse64.v v12, (a0), zero
; CHECK-ZVKB32-NEXT: vand.vv v8, v12, v8
; CHECK-ZVKB32-NEXT: addi sp, sp, 16
; CHECK-ZVKB32-NEXT: .cfi_def_cfa_offset 0
; CHECK-ZVKB32-NEXT: ret
;
; CHECK-ZVKB64-LABEL: vandn_vx_nxv4i64:
; CHECK-ZVKB64: # %bb.0:
; CHECK-ZVKB64-NEXT: vsetvli a1, zero, e64, m4, ta, ma
; CHECK-ZVKB64-NEXT: vandn.vx v8, v8, a0
; CHECK-ZVKB64-NEXT: ret
%a = xor i64 %x, -1
%head = insertelement <vscale x 4 x i64> poison, i64 %a, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
%b = and <vscale x 4 x i64> %splat, %y
ret <vscale x 4 x i64> %b
}
define <vscale x 4 x i64> @vandn_vx_swapped_nxv4i64(i64 %x, <vscale x 4 x i64> %y) {
; CHECK-RV32-LABEL: vandn_vx_swapped_nxv4i64:
; CHECK-RV32: # %bb.0:
; CHECK-RV32-NEXT: addi sp, sp, -16
; CHECK-RV32-NEXT: .cfi_def_cfa_offset 16
; CHECK-RV32-NEXT: not a0, a0
; CHECK-RV32-NEXT: not a1, a1
; CHECK-RV32-NEXT: sw a0, 8(sp)
; CHECK-RV32-NEXT: sw a1, 12(sp)
; CHECK-RV32-NEXT: addi a0, sp, 8
; CHECK-RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
; CHECK-RV32-NEXT: vlse64.v v12, (a0), zero
; CHECK-RV32-NEXT: vand.vv v8, v12, v8
; CHECK-RV32-NEXT: addi sp, sp, 16
; CHECK-RV32-NEXT: .cfi_def_cfa_offset 0
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: vandn_vx_swapped_nxv4i64:
; CHECK-RV64: # %bb.0:
; CHECK-RV64-NEXT: not a0, a0
; CHECK-RV64-NEXT: vsetvli a1, zero, e64, m4, ta, ma
; CHECK-RV64-NEXT: vand.vx v8, v8, a0
; CHECK-RV64-NEXT: ret
;
; CHECK-ZVKB32-LABEL: vandn_vx_swapped_nxv4i64:
; CHECK-ZVKB32: # %bb.0:
; CHECK-ZVKB32-NEXT: addi sp, sp, -16
; CHECK-ZVKB32-NEXT: .cfi_def_cfa_offset 16
; CHECK-ZVKB32-NEXT: not a0, a0
; CHECK-ZVKB32-NEXT: not a1, a1
; CHECK-ZVKB32-NEXT: sw a0, 8(sp)
; CHECK-ZVKB32-NEXT: sw a1, 12(sp)
; CHECK-ZVKB32-NEXT: addi a0, sp, 8
; CHECK-ZVKB32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
; CHECK-ZVKB32-NEXT: vlse64.v v12, (a0), zero
; CHECK-ZVKB32-NEXT: vand.vv v8, v12, v8
; CHECK-ZVKB32-NEXT: addi sp, sp, 16
; CHECK-ZVKB32-NEXT: .cfi_def_cfa_offset 0
; CHECK-ZVKB32-NEXT: ret
;
; CHECK-ZVKB64-LABEL: vandn_vx_swapped_nxv4i64:
; CHECK-ZVKB64: # %bb.0:
; CHECK-ZVKB64-NEXT: vsetvli a1, zero, e64, m4, ta, ma
; CHECK-ZVKB64-NEXT: vandn.vx v8, v8, a0
; CHECK-ZVKB64-NEXT: ret
%a = xor i64 %x, -1
%head = insertelement <vscale x 4 x i64> poison, i64 %a, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
%b = and <vscale x 4 x i64> %splat, %y
ret <vscale x 4 x i64> %b
}
define <vscale x 8 x i64> @vandn_vv_nxv8i64(<vscale x 8 x i64> %x, <vscale x 8 x i64> %y) {
; CHECK-LABEL: vandn_vv_nxv8i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
; CHECK-NEXT: vnot.v v8, v8
; CHECK-NEXT: vand.vv v8, v8, v16
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vv_nxv8i64:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a0, zero, e64, m8, ta, ma
; CHECK-ZVKB-NEXT: vandn.vv v8, v16, v8
; CHECK-ZVKB-NEXT: ret
%a = xor <vscale x 8 x i64> %x, splat (i64 -1)
%b = and <vscale x 8 x i64> %a, %y
ret <vscale x 8 x i64> %b
}
define <vscale x 8 x i64> @vandn_vv_swapped_nxv8i64(<vscale x 8 x i64> %x, <vscale x 8 x i64> %y) {
; CHECK-LABEL: vandn_vv_swapped_nxv8i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
; CHECK-NEXT: vnot.v v8, v8
; CHECK-NEXT: vand.vv v8, v16, v8
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vv_swapped_nxv8i64:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a0, zero, e64, m8, ta, ma
; CHECK-ZVKB-NEXT: vandn.vv v8, v16, v8
; CHECK-ZVKB-NEXT: ret
%a = xor <vscale x 8 x i64> %x, splat (i64 -1)
%b = and <vscale x 8 x i64> %y, %a
ret <vscale x 8 x i64> %b
}
define <vscale x 8 x i64> @vandn_vx_nxv8i64(i64 %x, <vscale x 8 x i64> %y) {
; CHECK-RV32-LABEL: vandn_vx_nxv8i64:
; CHECK-RV32: # %bb.0:
; CHECK-RV32-NEXT: addi sp, sp, -16
; CHECK-RV32-NEXT: .cfi_def_cfa_offset 16
; CHECK-RV32-NEXT: not a0, a0
; CHECK-RV32-NEXT: not a1, a1
; CHECK-RV32-NEXT: sw a0, 8(sp)
; CHECK-RV32-NEXT: sw a1, 12(sp)
; CHECK-RV32-NEXT: addi a0, sp, 8
; CHECK-RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; CHECK-RV32-NEXT: vlse64.v v16, (a0), zero
; CHECK-RV32-NEXT: vand.vv v8, v16, v8
; CHECK-RV32-NEXT: addi sp, sp, 16
; CHECK-RV32-NEXT: .cfi_def_cfa_offset 0
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: vandn_vx_nxv8i64:
; CHECK-RV64: # %bb.0:
; CHECK-RV64-NEXT: not a0, a0
; CHECK-RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; CHECK-RV64-NEXT: vand.vx v8, v8, a0
; CHECK-RV64-NEXT: ret
;
; CHECK-ZVKB32-LABEL: vandn_vx_nxv8i64:
; CHECK-ZVKB32: # %bb.0:
; CHECK-ZVKB32-NEXT: addi sp, sp, -16
; CHECK-ZVKB32-NEXT: .cfi_def_cfa_offset 16
; CHECK-ZVKB32-NEXT: not a0, a0
; CHECK-ZVKB32-NEXT: not a1, a1
; CHECK-ZVKB32-NEXT: sw a0, 8(sp)
; CHECK-ZVKB32-NEXT: sw a1, 12(sp)
; CHECK-ZVKB32-NEXT: addi a0, sp, 8
; CHECK-ZVKB32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; CHECK-ZVKB32-NEXT: vlse64.v v16, (a0), zero
; CHECK-ZVKB32-NEXT: vand.vv v8, v16, v8
; CHECK-ZVKB32-NEXT: addi sp, sp, 16
; CHECK-ZVKB32-NEXT: .cfi_def_cfa_offset 0
; CHECK-ZVKB32-NEXT: ret
;
; CHECK-ZVKB64-LABEL: vandn_vx_nxv8i64:
; CHECK-ZVKB64: # %bb.0:
; CHECK-ZVKB64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; CHECK-ZVKB64-NEXT: vandn.vx v8, v8, a0
; CHECK-ZVKB64-NEXT: ret
%a = xor i64 %x, -1
%head = insertelement <vscale x 8 x i64> poison, i64 %a, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
%b = and <vscale x 8 x i64> %splat, %y
ret <vscale x 8 x i64> %b
}
define <vscale x 8 x i64> @vandn_vx_swapped_nxv8i64(i64 %x, <vscale x 8 x i64> %y) {
; CHECK-RV32-LABEL: vandn_vx_swapped_nxv8i64:
; CHECK-RV32: # %bb.0:
; CHECK-RV32-NEXT: addi sp, sp, -16
; CHECK-RV32-NEXT: .cfi_def_cfa_offset 16
; CHECK-RV32-NEXT: not a0, a0
; CHECK-RV32-NEXT: not a1, a1
; CHECK-RV32-NEXT: sw a0, 8(sp)
; CHECK-RV32-NEXT: sw a1, 12(sp)
; CHECK-RV32-NEXT: addi a0, sp, 8
; CHECK-RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; CHECK-RV32-NEXT: vlse64.v v16, (a0), zero
; CHECK-RV32-NEXT: vand.vv v8, v16, v8
; CHECK-RV32-NEXT: addi sp, sp, 16
; CHECK-RV32-NEXT: .cfi_def_cfa_offset 0
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: vandn_vx_swapped_nxv8i64:
; CHECK-RV64: # %bb.0:
; CHECK-RV64-NEXT: not a0, a0
; CHECK-RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; CHECK-RV64-NEXT: vand.vx v8, v8, a0
; CHECK-RV64-NEXT: ret
;
; CHECK-ZVKB32-LABEL: vandn_vx_swapped_nxv8i64:
; CHECK-ZVKB32: # %bb.0:
; CHECK-ZVKB32-NEXT: addi sp, sp, -16
; CHECK-ZVKB32-NEXT: .cfi_def_cfa_offset 16
; CHECK-ZVKB32-NEXT: not a0, a0
; CHECK-ZVKB32-NEXT: not a1, a1
; CHECK-ZVKB32-NEXT: sw a0, 8(sp)
; CHECK-ZVKB32-NEXT: sw a1, 12(sp)
; CHECK-ZVKB32-NEXT: addi a0, sp, 8
; CHECK-ZVKB32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; CHECK-ZVKB32-NEXT: vlse64.v v16, (a0), zero
; CHECK-ZVKB32-NEXT: vand.vv v8, v16, v8
; CHECK-ZVKB32-NEXT: addi sp, sp, 16
; CHECK-ZVKB32-NEXT: .cfi_def_cfa_offset 0
; CHECK-ZVKB32-NEXT: ret
;
; CHECK-ZVKB64-LABEL: vandn_vx_swapped_nxv8i64:
; CHECK-ZVKB64: # %bb.0:
; CHECK-ZVKB64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; CHECK-ZVKB64-NEXT: vandn.vx v8, v8, a0
; CHECK-ZVKB64-NEXT: ret
%a = xor i64 %x, -1
%head = insertelement <vscale x 8 x i64> poison, i64 %a, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
%b = and <vscale x 8 x i64> %splat, %y
ret <vscale x 8 x i64> %b
}
define <vscale x 1 x i16> @vandn_vx_imm16(<vscale x 1 x i16> %x) {
; CHECK-LABEL: vandn_vx_imm16:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 8
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vx_imm16:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: lui a0, 1048568
; CHECK-ZVKB-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a0
; CHECK-ZVKB-NEXT: ret
%a = and <vscale x 1 x i16> splat (i16 32767), %x
ret <vscale x 1 x i16> %a
}
define <vscale x 1 x i16> @vandn_vx_swapped_imm16(<vscale x 1 x i16> %x) {
; CHECK-LABEL: vandn_vx_swapped_imm16:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 8
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vx_swapped_imm16:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: lui a0, 1048568
; CHECK-ZVKB-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a0
; CHECK-ZVKB-NEXT: ret
%a = and <vscale x 1 x i16> %x, splat (i16 32767)
ret <vscale x 1 x i16> %a
}
define <vscale x 1 x i64> @vandn_vx_imm64(<vscale x 1 x i64> %x) {
; CHECK-RV32-LABEL: vandn_vx_imm64:
; CHECK-RV32: # %bb.0:
; CHECK-RV32-NEXT: addi sp, sp, -16
; CHECK-RV32-NEXT: .cfi_def_cfa_offset 16
; CHECK-RV32-NEXT: lui a0, 1044480
; CHECK-RV32-NEXT: li a1, 255
; CHECK-RV32-NEXT: sw a1, 8(sp)
; CHECK-RV32-NEXT: sw a0, 12(sp)
; CHECK-RV32-NEXT: addi a0, sp, 8
; CHECK-RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
; CHECK-RV32-NEXT: vlse64.v v9, (a0), zero
; CHECK-RV32-NEXT: vand.vv v8, v8, v9
; CHECK-RV32-NEXT: addi sp, sp, 16
; CHECK-RV32-NEXT: .cfi_def_cfa_offset 0
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: vandn_vx_imm64:
; CHECK-RV64: # %bb.0:
; CHECK-RV64-NEXT: li a0, -1
; CHECK-RV64-NEXT: slli a0, a0, 56
; CHECK-RV64-NEXT: addi a0, a0, 255
; CHECK-RV64-NEXT: vsetvli a1, zero, e64, m1, ta, ma
; CHECK-RV64-NEXT: vand.vx v8, v8, a0
; CHECK-RV64-NEXT: ret
;
; CHECK-ZVKB32-LABEL: vandn_vx_imm64:
; CHECK-ZVKB32: # %bb.0:
; CHECK-ZVKB32-NEXT: addi sp, sp, -16
; CHECK-ZVKB32-NEXT: .cfi_def_cfa_offset 16
; CHECK-ZVKB32-NEXT: lui a0, 1044480
; CHECK-ZVKB32-NEXT: li a1, 255
; CHECK-ZVKB32-NEXT: sw a1, 8(sp)
; CHECK-ZVKB32-NEXT: sw a0, 12(sp)
; CHECK-ZVKB32-NEXT: addi a0, sp, 8
; CHECK-ZVKB32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
; CHECK-ZVKB32-NEXT: vlse64.v v9, (a0), zero
; CHECK-ZVKB32-NEXT: vand.vv v8, v8, v9
; CHECK-ZVKB32-NEXT: addi sp, sp, 16
; CHECK-ZVKB32-NEXT: .cfi_def_cfa_offset 0
; CHECK-ZVKB32-NEXT: ret
;
; CHECK-ZVKB64-LABEL: vandn_vx_imm64:
; CHECK-ZVKB64: # %bb.0:
; CHECK-ZVKB64-NEXT: lui a0, 1048560
; CHECK-ZVKB64-NEXT: srli a0, a0, 8
; CHECK-ZVKB64-NEXT: vsetvli a1, zero, e64, m1, ta, ma
; CHECK-ZVKB64-NEXT: vandn.vx v8, v8, a0
; CHECK-ZVKB64-NEXT: ret
%a = and <vscale x 1 x i64> %x, splat (i64 -72057594037927681)
ret <vscale x 1 x i64> %a
}
define <vscale x 1 x i16> @vandn_vx_multi_imm16(<vscale x 1 x i16> %x, <vscale x 1 x i16> %y) {
; CHECK-LABEL: vandn_vx_multi_imm16:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 4
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0
; CHECK-NEXT: vand.vx v9, v9, a0
; CHECK-NEXT: vadd.vv v8, v8, v9
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vandn_vx_multi_imm16:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: lui a0, 1048572
; CHECK-ZVKB-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a0
; CHECK-ZVKB-NEXT: vandn.vx v9, v9, a0
; CHECK-ZVKB-NEXT: vadd.vv v8, v8, v9
; CHECK-ZVKB-NEXT: ret
%a = and <vscale x 1 x i16> %x, splat (i16 16383)
%b = and <vscale x 1 x i16> %y, splat (i16 16383)
%c = add <vscale x 1 x i16> %a, %b
ret <vscale x 1 x i16> %c
}
define <vscale x 1 x i16> @vandn_vx_multi_scalar_imm16(<vscale x 1 x i16> %x, i16 %y) {
; CHECK-LABEL: vandn_vx_multi_scalar_imm16:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a1, 8
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: vsetvli a2, zero, e16, mf4, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a1
; CHECK-NEXT: or a0, a0, a1
; CHECK-NEXT: vadd.vx v8, v8, a0
; CHECK-NEXT: ret
;
; CHECK-ZVKB-NOZBB-LABEL: vandn_vx_multi_scalar_imm16:
; CHECK-ZVKB-NOZBB: # %bb.0:
; CHECK-ZVKB-NOZBB-NEXT: lui a1, 8
; CHECK-ZVKB-NOZBB-NEXT: addi a1, a1, -1
; CHECK-ZVKB-NOZBB-NEXT: vsetvli a2, zero, e16, mf4, ta, ma
; CHECK-ZVKB-NOZBB-NEXT: vand.vx v8, v8, a1
; CHECK-ZVKB-NOZBB-NEXT: or a0, a0, a1
; CHECK-ZVKB-NOZBB-NEXT: vadd.vx v8, v8, a0
; CHECK-ZVKB-NOZBB-NEXT: ret
;
; CHECK-ZVKB-ZBB-LABEL: vandn_vx_multi_scalar_imm16:
; CHECK-ZVKB-ZBB: # %bb.0:
; CHECK-ZVKB-ZBB-NEXT: lui a1, 1048568
; CHECK-ZVKB-ZBB-NEXT: vsetvli a2, zero, e16, mf4, ta, ma
; CHECK-ZVKB-ZBB-NEXT: vandn.vx v8, v8, a1
; CHECK-ZVKB-ZBB-NEXT: orn a0, a0, a1
; CHECK-ZVKB-ZBB-NEXT: vadd.vx v8, v8, a0
; CHECK-ZVKB-ZBB-NEXT: ret
%a = and <vscale x 1 x i16> %x, splat (i16 32767)
%b = or i16 %y, 32767
%head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
%c = add <vscale x 1 x i16> %a, %splat
ret <vscale x 1 x i16> %c
}
define <vscale x 1 x i16> @vand_vadd_vx_imm16(<vscale x 1 x i16> %x) {
; CHECK-LABEL: vand_vadd_vx_imm16:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 8
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0
; CHECK-NEXT: vadd.vx v8, v8, a0
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vand_vadd_vx_imm16:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: lui a0, 8
; CHECK-ZVKB-NEXT: addi a0, a0, -1
; CHECK-ZVKB-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; CHECK-ZVKB-NEXT: vand.vx v8, v8, a0
; CHECK-ZVKB-NEXT: vadd.vx v8, v8, a0
; CHECK-ZVKB-NEXT: ret
%a = and <vscale x 1 x i16> %x, splat (i16 32767)
%b = add <vscale x 1 x i16> %a, splat (i16 32767)
ret <vscale x 1 x i16> %b
}
define <vscale x 1 x i8> @vand_vx_hoisted_not(<vscale x 1 x i8> %x, i8 %m, i1 zeroext %cond) {
; CHECK-LABEL: vand_vx_hoisted_not:
; CHECK: # %bb.0:
; CHECK-NEXT: beqz a1, .LBB94_2
; CHECK-NEXT: # %bb.1: # %mask
; CHECK-NEXT: not a0, a0
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0
; CHECK-NEXT: .LBB94_2: # %identity
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vand_vx_hoisted_not:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: beqz a1, .LBB94_2
; CHECK-ZVKB-NEXT: # %bb.1: # %mask
; CHECK-ZVKB-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a0
; CHECK-ZVKB-NEXT: .LBB94_2: # %identity
; CHECK-ZVKB-NEXT: ret
%a = xor i8 %m, -1
%head = insertelement <vscale x 1 x i8> poison, i8 %a, i32 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
br i1 %cond, label %mask, label %identity
mask:
%masked = and <vscale x 1 x i8> %splat, %x
ret <vscale x 1 x i8> %masked
identity:
ret <vscale x 1 x i8> %x
}
define <vscale x 1 x i8> @vand_vx_hoisted_not_swapped(<vscale x 1 x i8> %x, i8 %m, i1 zeroext %cond) {
; CHECK-LABEL: vand_vx_hoisted_not_swapped:
; CHECK: # %bb.0:
; CHECK-NEXT: beqz a1, .LBB95_2
; CHECK-NEXT: # %bb.1: # %mask
; CHECK-NEXT: not a0, a0
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0
; CHECK-NEXT: .LBB95_2: # %identity
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vand_vx_hoisted_not_swapped:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: beqz a1, .LBB95_2
; CHECK-ZVKB-NEXT: # %bb.1: # %mask
; CHECK-ZVKB-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a0
; CHECK-ZVKB-NEXT: .LBB95_2: # %identity
; CHECK-ZVKB-NEXT: ret
%a = xor i8 %m, -1
%head = insertelement <vscale x 1 x i8> poison, i8 %a, i32 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
br i1 %cond, label %mask, label %identity
mask:
%masked = and <vscale x 1 x i8> %x, %splat
ret <vscale x 1 x i8> %masked
identity:
ret <vscale x 1 x i8> %x
}
define <vscale x 1 x i8> @vand_vv_hoisted_not(<vscale x 1 x i8> %x, <vscale x 1 x i8> %m, i1 zeroext %cond) {
; CHECK-LABEL: vand_vv_hoisted_not:
; CHECK: # %bb.0:
; CHECK-NEXT: beqz a0, .LBB96_2
; CHECK-NEXT: # %bb.1: # %mask
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
; CHECK-NEXT: vnot.v v9, v9
; CHECK-NEXT: vand.vv v8, v9, v8
; CHECK-NEXT: .LBB96_2: # %identity
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vand_vv_hoisted_not:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: beqz a0, .LBB96_2
; CHECK-ZVKB-NEXT: # %bb.1: # %mask
; CHECK-ZVKB-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
; CHECK-ZVKB-NEXT: vandn.vv v8, v8, v9
; CHECK-ZVKB-NEXT: .LBB96_2: # %identity
; CHECK-ZVKB-NEXT: ret
%a = xor <vscale x 1 x i8> %m, splat (i8 -1)
br i1 %cond, label %mask, label %identity
mask:
%masked = and <vscale x 1 x i8> %a, %x
ret <vscale x 1 x i8> %masked
identity:
ret <vscale x 1 x i8> %x
}
define <vscale x 1 x i8> @vand_vv_hoisted_not_swapped(<vscale x 1 x i8> %x, <vscale x 1 x i8> %m, i1 zeroext %cond) {
; CHECK-LABEL: vand_vv_hoisted_not_swapped:
; CHECK: # %bb.0:
; CHECK-NEXT: beqz a0, .LBB97_2
; CHECK-NEXT: # %bb.1: # %mask
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
; CHECK-NEXT: vnot.v v9, v9
; CHECK-NEXT: vand.vv v8, v8, v9
; CHECK-NEXT: .LBB97_2: # %identity
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vand_vv_hoisted_not_swapped:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: beqz a0, .LBB97_2
; CHECK-ZVKB-NEXT: # %bb.1: # %mask
; CHECK-ZVKB-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
; CHECK-ZVKB-NEXT: vandn.vv v8, v8, v9
; CHECK-ZVKB-NEXT: .LBB97_2: # %identity
; CHECK-ZVKB-NEXT: ret
%a = xor <vscale x 1 x i8> %m, splat (i8 -1)
br i1 %cond, label %mask, label %identity
mask:
%masked = and <vscale x 1 x i8> %x, %a
ret <vscale x 1 x i8> %masked
identity:
ret <vscale x 1 x i8> %x
}
declare i64 @llvm.vscale.i64()
define void @vand_vx_loop_hoisted_not(ptr %a, i32 noundef signext %mask) {
; CHECK-RV32-LABEL: vand_vx_loop_hoisted_not:
; CHECK-RV32: # %bb.0: # %entry
; CHECK-RV32-NEXT: csrr a4, vlenb
; CHECK-RV32-NEXT: srli a2, a4, 3
; CHECK-RV32-NEXT: li a3, 64
; CHECK-RV32-NEXT: not a1, a1
; CHECK-RV32-NEXT: bgeu a3, a2, .LBB98_2
; CHECK-RV32-NEXT: # %bb.1:
; CHECK-RV32-NEXT: li a3, 0
; CHECK-RV32-NEXT: li a2, 0
; CHECK-RV32-NEXT: j .LBB98_5
; CHECK-RV32-NEXT: .LBB98_2: # %vector.ph
; CHECK-RV32-NEXT: li a2, 0
; CHECK-RV32-NEXT: srli a4, a4, 1
; CHECK-RV32-NEXT: neg a3, a4
; CHECK-RV32-NEXT: andi a3, a3, 256
; CHECK-RV32-NEXT: li a6, 0
; CHECK-RV32-NEXT: li a5, 0
; CHECK-RV32-NEXT: vsetvli a7, zero, e32, m2, ta, ma
; CHECK-RV32-NEXT: .LBB98_3: # %vector.body
; CHECK-RV32-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-RV32-NEXT: slli a7, a6, 2
; CHECK-RV32-NEXT: add t0, a6, a4
; CHECK-RV32-NEXT: add a7, a0, a7
; CHECK-RV32-NEXT: vl2re32.v v8, (a7)
; CHECK-RV32-NEXT: sltu a6, t0, a6
; CHECK-RV32-NEXT: add a5, a5, a6
; CHECK-RV32-NEXT: xor a6, t0, a3
; CHECK-RV32-NEXT: vand.vx v8, v8, a1
; CHECK-RV32-NEXT: or t1, a6, a5
; CHECK-RV32-NEXT: vs2r.v v8, (a7)
; CHECK-RV32-NEXT: mv a6, t0
; CHECK-RV32-NEXT: bnez t1, .LBB98_3
; CHECK-RV32-NEXT: # %bb.4: # %middle.block
; CHECK-RV32-NEXT: bnez a3, .LBB98_6
; CHECK-RV32-NEXT: .LBB98_5: # %for.body
; CHECK-RV32-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-RV32-NEXT: slli a4, a3, 2
; CHECK-RV32-NEXT: addi a3, a3, 1
; CHECK-RV32-NEXT: add a4, a0, a4
; CHECK-RV32-NEXT: lw a5, 0(a4)
; CHECK-RV32-NEXT: seqz a6, a3
; CHECK-RV32-NEXT: add a2, a2, a6
; CHECK-RV32-NEXT: xori a6, a3, 256
; CHECK-RV32-NEXT: and a5, a5, a1
; CHECK-RV32-NEXT: or a6, a6, a2
; CHECK-RV32-NEXT: sw a5, 0(a4)
; CHECK-RV32-NEXT: bnez a6, .LBB98_5
; CHECK-RV32-NEXT: .LBB98_6: # %for.cond.cleanup
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: vand_vx_loop_hoisted_not:
; CHECK-RV64: # %bb.0: # %entry
; CHECK-RV64-NEXT: csrr a4, vlenb
; CHECK-RV64-NEXT: srli a2, a4, 3
; CHECK-RV64-NEXT: li a3, 64
; CHECK-RV64-NEXT: not a1, a1
; CHECK-RV64-NEXT: bgeu a3, a2, .LBB98_2
; CHECK-RV64-NEXT: # %bb.1:
; CHECK-RV64-NEXT: li a2, 0
; CHECK-RV64-NEXT: j .LBB98_5
; CHECK-RV64-NEXT: .LBB98_2: # %vector.ph
; CHECK-RV64-NEXT: srli a3, a4, 1
; CHECK-RV64-NEXT: neg a2, a3
; CHECK-RV64-NEXT: andi a2, a2, 256
; CHECK-RV64-NEXT: slli a4, a4, 1
; CHECK-RV64-NEXT: mv a5, a0
; CHECK-RV64-NEXT: mv a6, a2
; CHECK-RV64-NEXT: vsetvli a7, zero, e32, m2, ta, ma
; CHECK-RV64-NEXT: .LBB98_3: # %vector.body
; CHECK-RV64-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-RV64-NEXT: vl2re32.v v8, (a5)
; CHECK-RV64-NEXT: sub a6, a6, a3
; CHECK-RV64-NEXT: vand.vx v8, v8, a1
; CHECK-RV64-NEXT: vs2r.v v8, (a5)
; CHECK-RV64-NEXT: add a5, a5, a4
; CHECK-RV64-NEXT: bnez a6, .LBB98_3
; CHECK-RV64-NEXT: # %bb.4: # %middle.block
; CHECK-RV64-NEXT: bnez a2, .LBB98_7
; CHECK-RV64-NEXT: .LBB98_5: # %for.body.preheader
; CHECK-RV64-NEXT: slli a2, a2, 2
; CHECK-RV64-NEXT: add a2, a0, a2
; CHECK-RV64-NEXT: addi a0, a0, 1024
; CHECK-RV64-NEXT: .LBB98_6: # %for.body
; CHECK-RV64-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-RV64-NEXT: lw a3, 0(a2)
; CHECK-RV64-NEXT: and a3, a3, a1
; CHECK-RV64-NEXT: sw a3, 0(a2)
; CHECK-RV64-NEXT: addi a2, a2, 4
; CHECK-RV64-NEXT: bne a2, a0, .LBB98_6
; CHECK-RV64-NEXT: .LBB98_7: # %for.cond.cleanup
; CHECK-RV64-NEXT: ret
;
; CHECK-ZVKB-NOZBB32-LABEL: vand_vx_loop_hoisted_not:
; CHECK-ZVKB-NOZBB32: # %bb.0: # %entry
; CHECK-ZVKB-NOZBB32-NEXT: csrr a4, vlenb
; CHECK-ZVKB-NOZBB32-NEXT: srli a2, a4, 3
; CHECK-ZVKB-NOZBB32-NEXT: li a3, 64
; CHECK-ZVKB-NOZBB32-NEXT: bgeu a3, a2, .LBB98_2
; CHECK-ZVKB-NOZBB32-NEXT: # %bb.1:
; CHECK-ZVKB-NOZBB32-NEXT: li a3, 0
; CHECK-ZVKB-NOZBB32-NEXT: li a2, 0
; CHECK-ZVKB-NOZBB32-NEXT: j .LBB98_5
; CHECK-ZVKB-NOZBB32-NEXT: .LBB98_2: # %vector.ph
; CHECK-ZVKB-NOZBB32-NEXT: li a2, 0
; CHECK-ZVKB-NOZBB32-NEXT: srli a4, a4, 1
; CHECK-ZVKB-NOZBB32-NEXT: neg a3, a4
; CHECK-ZVKB-NOZBB32-NEXT: andi a3, a3, 256
; CHECK-ZVKB-NOZBB32-NEXT: li a6, 0
; CHECK-ZVKB-NOZBB32-NEXT: li a5, 0
; CHECK-ZVKB-NOZBB32-NEXT: vsetvli a7, zero, e32, m2, ta, ma
; CHECK-ZVKB-NOZBB32-NEXT: .LBB98_3: # %vector.body
; CHECK-ZVKB-NOZBB32-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-ZVKB-NOZBB32-NEXT: slli a7, a6, 2
; CHECK-ZVKB-NOZBB32-NEXT: add t0, a6, a4
; CHECK-ZVKB-NOZBB32-NEXT: add a7, a0, a7
; CHECK-ZVKB-NOZBB32-NEXT: vl2re32.v v8, (a7)
; CHECK-ZVKB-NOZBB32-NEXT: sltu a6, t0, a6
; CHECK-ZVKB-NOZBB32-NEXT: add a5, a5, a6
; CHECK-ZVKB-NOZBB32-NEXT: xor a6, t0, a3
; CHECK-ZVKB-NOZBB32-NEXT: vandn.vx v8, v8, a1
; CHECK-ZVKB-NOZBB32-NEXT: or t1, a6, a5
; CHECK-ZVKB-NOZBB32-NEXT: vs2r.v v8, (a7)
; CHECK-ZVKB-NOZBB32-NEXT: mv a6, t0
; CHECK-ZVKB-NOZBB32-NEXT: bnez t1, .LBB98_3
; CHECK-ZVKB-NOZBB32-NEXT: # %bb.4: # %middle.block
; CHECK-ZVKB-NOZBB32-NEXT: bnez a3, .LBB98_7
; CHECK-ZVKB-NOZBB32-NEXT: .LBB98_5: # %for.body.preheader
; CHECK-ZVKB-NOZBB32-NEXT: not a1, a1
; CHECK-ZVKB-NOZBB32-NEXT: .LBB98_6: # %for.body
; CHECK-ZVKB-NOZBB32-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-ZVKB-NOZBB32-NEXT: slli a4, a3, 2
; CHECK-ZVKB-NOZBB32-NEXT: addi a3, a3, 1
; CHECK-ZVKB-NOZBB32-NEXT: add a4, a0, a4
; CHECK-ZVKB-NOZBB32-NEXT: lw a5, 0(a4)
; CHECK-ZVKB-NOZBB32-NEXT: seqz a6, a3
; CHECK-ZVKB-NOZBB32-NEXT: add a2, a2, a6
; CHECK-ZVKB-NOZBB32-NEXT: xori a6, a3, 256
; CHECK-ZVKB-NOZBB32-NEXT: and a5, a5, a1
; CHECK-ZVKB-NOZBB32-NEXT: or a6, a6, a2
; CHECK-ZVKB-NOZBB32-NEXT: sw a5, 0(a4)
; CHECK-ZVKB-NOZBB32-NEXT: bnez a6, .LBB98_6
; CHECK-ZVKB-NOZBB32-NEXT: .LBB98_7: # %for.cond.cleanup
; CHECK-ZVKB-NOZBB32-NEXT: ret
;
; CHECK-ZVKB-NOZBB64-LABEL: vand_vx_loop_hoisted_not:
; CHECK-ZVKB-NOZBB64: # %bb.0: # %entry
; CHECK-ZVKB-NOZBB64-NEXT: csrr a4, vlenb
; CHECK-ZVKB-NOZBB64-NEXT: srli a2, a4, 3
; CHECK-ZVKB-NOZBB64-NEXT: li a3, 64
; CHECK-ZVKB-NOZBB64-NEXT: bgeu a3, a2, .LBB98_2
; CHECK-ZVKB-NOZBB64-NEXT: # %bb.1:
; CHECK-ZVKB-NOZBB64-NEXT: li a2, 0
; CHECK-ZVKB-NOZBB64-NEXT: j .LBB98_5
; CHECK-ZVKB-NOZBB64-NEXT: .LBB98_2: # %vector.ph
; CHECK-ZVKB-NOZBB64-NEXT: srli a3, a4, 1
; CHECK-ZVKB-NOZBB64-NEXT: neg a2, a3
; CHECK-ZVKB-NOZBB64-NEXT: andi a2, a2, 256
; CHECK-ZVKB-NOZBB64-NEXT: slli a4, a4, 1
; CHECK-ZVKB-NOZBB64-NEXT: mv a5, a0
; CHECK-ZVKB-NOZBB64-NEXT: mv a6, a2
; CHECK-ZVKB-NOZBB64-NEXT: vsetvli a7, zero, e32, m2, ta, ma
; CHECK-ZVKB-NOZBB64-NEXT: .LBB98_3: # %vector.body
; CHECK-ZVKB-NOZBB64-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-ZVKB-NOZBB64-NEXT: vl2re32.v v8, (a5)
; CHECK-ZVKB-NOZBB64-NEXT: sub a6, a6, a3
; CHECK-ZVKB-NOZBB64-NEXT: vandn.vx v8, v8, a1
; CHECK-ZVKB-NOZBB64-NEXT: vs2r.v v8, (a5)
; CHECK-ZVKB-NOZBB64-NEXT: add a5, a5, a4
; CHECK-ZVKB-NOZBB64-NEXT: bnez a6, .LBB98_3
; CHECK-ZVKB-NOZBB64-NEXT: # %bb.4: # %middle.block
; CHECK-ZVKB-NOZBB64-NEXT: bnez a2, .LBB98_7
; CHECK-ZVKB-NOZBB64-NEXT: .LBB98_5: # %for.body.preheader
; CHECK-ZVKB-NOZBB64-NEXT: not a1, a1
; CHECK-ZVKB-NOZBB64-NEXT: slli a2, a2, 2
; CHECK-ZVKB-NOZBB64-NEXT: add a2, a0, a2
; CHECK-ZVKB-NOZBB64-NEXT: addi a0, a0, 1024
; CHECK-ZVKB-NOZBB64-NEXT: .LBB98_6: # %for.body
; CHECK-ZVKB-NOZBB64-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-ZVKB-NOZBB64-NEXT: lw a3, 0(a2)
; CHECK-ZVKB-NOZBB64-NEXT: and a3, a3, a1
; CHECK-ZVKB-NOZBB64-NEXT: sw a3, 0(a2)
; CHECK-ZVKB-NOZBB64-NEXT: addi a2, a2, 4
; CHECK-ZVKB-NOZBB64-NEXT: bne a2, a0, .LBB98_6
; CHECK-ZVKB-NOZBB64-NEXT: .LBB98_7: # %for.cond.cleanup
; CHECK-ZVKB-NOZBB64-NEXT: ret
;
; CHECK-ZVKB-ZBB32-LABEL: vand_vx_loop_hoisted_not:
; CHECK-ZVKB-ZBB32: # %bb.0: # %entry
; CHECK-ZVKB-ZBB32-NEXT: csrr a4, vlenb
; CHECK-ZVKB-ZBB32-NEXT: srli a2, a4, 3
; CHECK-ZVKB-ZBB32-NEXT: li a3, 64
; CHECK-ZVKB-ZBB32-NEXT: bgeu a3, a2, .LBB98_2
; CHECK-ZVKB-ZBB32-NEXT: # %bb.1:
; CHECK-ZVKB-ZBB32-NEXT: li a3, 0
; CHECK-ZVKB-ZBB32-NEXT: li a2, 0
; CHECK-ZVKB-ZBB32-NEXT: j .LBB98_5
; CHECK-ZVKB-ZBB32-NEXT: .LBB98_2: # %vector.ph
; CHECK-ZVKB-ZBB32-NEXT: li a2, 0
; CHECK-ZVKB-ZBB32-NEXT: srli a4, a4, 1
; CHECK-ZVKB-ZBB32-NEXT: neg a3, a4
; CHECK-ZVKB-ZBB32-NEXT: andi a3, a3, 256
; CHECK-ZVKB-ZBB32-NEXT: li a6, 0
; CHECK-ZVKB-ZBB32-NEXT: li a5, 0
; CHECK-ZVKB-ZBB32-NEXT: vsetvli a7, zero, e32, m2, ta, ma
; CHECK-ZVKB-ZBB32-NEXT: .LBB98_3: # %vector.body
; CHECK-ZVKB-ZBB32-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-ZVKB-ZBB32-NEXT: slli a7, a6, 2
; CHECK-ZVKB-ZBB32-NEXT: add t0, a6, a4
; CHECK-ZVKB-ZBB32-NEXT: add a7, a0, a7
; CHECK-ZVKB-ZBB32-NEXT: vl2re32.v v8, (a7)
; CHECK-ZVKB-ZBB32-NEXT: sltu a6, t0, a6
; CHECK-ZVKB-ZBB32-NEXT: add a5, a5, a6
; CHECK-ZVKB-ZBB32-NEXT: xor a6, t0, a3
; CHECK-ZVKB-ZBB32-NEXT: vandn.vx v8, v8, a1
; CHECK-ZVKB-ZBB32-NEXT: or t1, a6, a5
; CHECK-ZVKB-ZBB32-NEXT: vs2r.v v8, (a7)
; CHECK-ZVKB-ZBB32-NEXT: mv a6, t0
; CHECK-ZVKB-ZBB32-NEXT: bnez t1, .LBB98_3
; CHECK-ZVKB-ZBB32-NEXT: # %bb.4: # %middle.block
; CHECK-ZVKB-ZBB32-NEXT: bnez a3, .LBB98_6
; CHECK-ZVKB-ZBB32-NEXT: .LBB98_5: # %for.body
; CHECK-ZVKB-ZBB32-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-ZVKB-ZBB32-NEXT: slli a4, a3, 2
; CHECK-ZVKB-ZBB32-NEXT: addi a3, a3, 1
; CHECK-ZVKB-ZBB32-NEXT: add a4, a0, a4
; CHECK-ZVKB-ZBB32-NEXT: lw a5, 0(a4)
; CHECK-ZVKB-ZBB32-NEXT: seqz a6, a3
; CHECK-ZVKB-ZBB32-NEXT: add a2, a2, a6
; CHECK-ZVKB-ZBB32-NEXT: xori a6, a3, 256
; CHECK-ZVKB-ZBB32-NEXT: andn a5, a5, a1
; CHECK-ZVKB-ZBB32-NEXT: or a6, a6, a2
; CHECK-ZVKB-ZBB32-NEXT: sw a5, 0(a4)
; CHECK-ZVKB-ZBB32-NEXT: bnez a6, .LBB98_5
; CHECK-ZVKB-ZBB32-NEXT: .LBB98_6: # %for.cond.cleanup
; CHECK-ZVKB-ZBB32-NEXT: ret
;
; CHECK-ZVKB-ZBB64-LABEL: vand_vx_loop_hoisted_not:
; CHECK-ZVKB-ZBB64: # %bb.0: # %entry
; CHECK-ZVKB-ZBB64-NEXT: csrr a4, vlenb
; CHECK-ZVKB-ZBB64-NEXT: srli a2, a4, 3
; CHECK-ZVKB-ZBB64-NEXT: li a3, 64
; CHECK-ZVKB-ZBB64-NEXT: bgeu a3, a2, .LBB98_2
; CHECK-ZVKB-ZBB64-NEXT: # %bb.1:
; CHECK-ZVKB-ZBB64-NEXT: li a2, 0
; CHECK-ZVKB-ZBB64-NEXT: j .LBB98_5
; CHECK-ZVKB-ZBB64-NEXT: .LBB98_2: # %vector.ph
; CHECK-ZVKB-ZBB64-NEXT: srli a3, a4, 1
; CHECK-ZVKB-ZBB64-NEXT: neg a2, a3
; CHECK-ZVKB-ZBB64-NEXT: andi a2, a2, 256
; CHECK-ZVKB-ZBB64-NEXT: slli a4, a4, 1
; CHECK-ZVKB-ZBB64-NEXT: mv a5, a0
; CHECK-ZVKB-ZBB64-NEXT: mv a6, a2
; CHECK-ZVKB-ZBB64-NEXT: vsetvli a7, zero, e32, m2, ta, ma
; CHECK-ZVKB-ZBB64-NEXT: .LBB98_3: # %vector.body
; CHECK-ZVKB-ZBB64-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-ZVKB-ZBB64-NEXT: vl2re32.v v8, (a5)
; CHECK-ZVKB-ZBB64-NEXT: sub a6, a6, a3
; CHECK-ZVKB-ZBB64-NEXT: vandn.vx v8, v8, a1
; CHECK-ZVKB-ZBB64-NEXT: vs2r.v v8, (a5)
; CHECK-ZVKB-ZBB64-NEXT: add a5, a5, a4
; CHECK-ZVKB-ZBB64-NEXT: bnez a6, .LBB98_3
; CHECK-ZVKB-ZBB64-NEXT: # %bb.4: # %middle.block
; CHECK-ZVKB-ZBB64-NEXT: bnez a2, .LBB98_7
; CHECK-ZVKB-ZBB64-NEXT: .LBB98_5: # %for.body.preheader
; CHECK-ZVKB-ZBB64-NEXT: slli a2, a2, 2
; CHECK-ZVKB-ZBB64-NEXT: add a2, a0, a2
; CHECK-ZVKB-ZBB64-NEXT: addi a0, a0, 1024
; CHECK-ZVKB-ZBB64-NEXT: .LBB98_6: # %for.body
; CHECK-ZVKB-ZBB64-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-ZVKB-ZBB64-NEXT: lw a3, 0(a2)
; CHECK-ZVKB-ZBB64-NEXT: andn a3, a3, a1
; CHECK-ZVKB-ZBB64-NEXT: sw a3, 0(a2)
; CHECK-ZVKB-ZBB64-NEXT: addi a2, a2, 4
; CHECK-ZVKB-ZBB64-NEXT: bne a2, a0, .LBB98_6
; CHECK-ZVKB-ZBB64-NEXT: .LBB98_7: # %for.cond.cleanup
; CHECK-ZVKB-ZBB64-NEXT: ret
entry:
%not = xor i32 %mask, -1
%vscale = tail call i64 @llvm.vscale.i64()
%min.iters.check = icmp samesign ugt i64 %vscale, 64
br i1 %min.iters.check, label %for.body.preheader, label %vector.ph
vector.ph:
%1 = tail call i64 @llvm.vscale.i64()
%.neg = mul nuw nsw i64 %1, 508
%n.vec = and i64 %.neg, 256
%2 = tail call i64 @llvm.vscale.i64()
%3 = shl nuw nsw i64 %2, 2
%broadcast.splatinsert = insertelement <vscale x 4 x i32> poison, i32 %not, i64 0
%broadcast.splat = shufflevector <vscale x 4 x i32> %broadcast.splatinsert, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
br label %vector.body
vector.body:
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
%4 = getelementptr inbounds nuw i32, ptr %a, i64 %index
%wide.load = load <vscale x 4 x i32>, ptr %4, align 4
%5 = and <vscale x 4 x i32> %wide.load, %broadcast.splat
store <vscale x 4 x i32> %5, ptr %4, align 4
%index.next = add nuw i64 %index, %3
%6 = icmp eq i64 %index.next, %n.vec
br i1 %6, label %middle.block, label %vector.body
middle.block:
%cmp.n.not = icmp eq i64 %n.vec, 0
br i1 %cmp.n.not, label %for.body.preheader, label %for.cond.cleanup
for.body.preheader:
%indvars.iv.ph = phi i64 [ 0, %entry ], [ %n.vec, %middle.block ]
br label %for.body
for.cond.cleanup:
ret void
for.body:
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader ]
%arrayidx = getelementptr inbounds nuw i32, ptr %a, i64 %indvars.iv
%7 = load i32, ptr %arrayidx, align 4
%and = and i32 %7, %not
store i32 %and, ptr %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond.not = icmp eq i64 %indvars.iv.next, 256
br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
}
define <vscale x 1 x i8> @not_signbit_mask_nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b) {
; CHECK-LABEL: not_signbit_mask_nxv1i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
; CHECK-NEXT: vmsgt.vi v0, v8, -1
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: not_signbit_mask_nxv1i8:
; CHECK-ZVKB: # %bb.0:
; CHECK-ZVKB-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
; CHECK-ZVKB-NEXT: vsra.vi v8, v8, 7
; CHECK-ZVKB-NEXT: vandn.vv v8, v9, v8
; CHECK-ZVKB-NEXT: ret
%cond = icmp sgt <vscale x 1 x i8> %a, splat (i8 -1)
%r = select <vscale x 1 x i1> %cond, <vscale x 1 x i8> %b, <vscale x 1 x i8> zeroinitializer
ret <vscale x 1 x i8> %r
}