| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 | 
 | ; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx < %s | FileCheck %s | 
 | ; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s | 
 |  | 
 | define void @andn_v32i8(ptr %res, ptr %a0, ptr %a1) nounwind { | 
 | ; CHECK-LABEL: andn_v32i8: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    xvld $xr0, $a1, 0 | 
 | ; CHECK-NEXT:    xvld $xr1, $a2, 0 | 
 | ; CHECK-NEXT:    xvxori.b $xr0, $xr0, 255 | 
 | ; CHECK-NEXT:    xvand.v $xr0, $xr0, $xr1 | 
 | ; CHECK-NEXT:    xvst $xr0, $a0, 0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %v0 = load <32 x i8>, ptr %a0 | 
 |   %v1 = load <32 x i8>, ptr %a1 | 
 |   %v2 = xor <32 x i8> %v0, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> | 
 |   %v3 = and <32 x i8> %v2, %v1 | 
 |   store <32 x i8> %v3, ptr %res | 
 |   ret void | 
 | } | 
 |  | 
 | define void @andn_v16i16(ptr %res, ptr %a0, ptr %a1) nounwind { | 
 | ; CHECK-LABEL: andn_v16i16: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    xvld $xr0, $a1, 0 | 
 | ; CHECK-NEXT:    xvld $xr1, $a2, 0 | 
 | ; CHECK-NEXT:    xvrepli.b $xr2, -1 | 
 | ; CHECK-NEXT:    xvxor.v $xr0, $xr0, $xr2 | 
 | ; CHECK-NEXT:    xvand.v $xr0, $xr0, $xr1 | 
 | ; CHECK-NEXT:    xvst $xr0, $a0, 0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %v0 = load <16 x i16>, ptr %a0 | 
 |   %v1 = load <16 x i16>, ptr %a1 | 
 |   %v2 = xor <16 x i16> %v0, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> | 
 |   %v3 = and <16 x i16> %v2, %v1 | 
 |   store <16 x i16> %v3, ptr %res | 
 |   ret void | 
 | } | 
 |  | 
 | define void @andn_v8i32(ptr %res, ptr %a0, ptr %a1) nounwind { | 
 | ; CHECK-LABEL: andn_v8i32: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    xvld $xr0, $a1, 0 | 
 | ; CHECK-NEXT:    xvld $xr1, $a2, 0 | 
 | ; CHECK-NEXT:    xvrepli.b $xr2, -1 | 
 | ; CHECK-NEXT:    xvxor.v $xr0, $xr0, $xr2 | 
 | ; CHECK-NEXT:    xvand.v $xr0, $xr0, $xr1 | 
 | ; CHECK-NEXT:    xvst $xr0, $a0, 0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %v0 = load <8 x i32>, ptr %a0 | 
 |   %v1 = load <8 x i32>, ptr %a1 | 
 |   %v2 = xor <8 x i32> %v0, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1> | 
 |   %v3 = and <8 x i32> %v2, %v1 | 
 |   store <8 x i32> %v3, ptr %res | 
 |   ret void | 
 | } | 
 |  | 
 | define void @andn_v4i64(ptr %res, ptr %a0, ptr %a1) nounwind { | 
 | ; CHECK-LABEL: andn_v4i64: | 
 | ; CHECK:       # %bb.0: # %entry | 
 | ; CHECK-NEXT:    xvld $xr0, $a1, 0 | 
 | ; CHECK-NEXT:    xvld $xr1, $a2, 0 | 
 | ; CHECK-NEXT:    xvrepli.b $xr2, -1 | 
 | ; CHECK-NEXT:    xvxor.v $xr0, $xr0, $xr2 | 
 | ; CHECK-NEXT:    xvand.v $xr0, $xr0, $xr1 | 
 | ; CHECK-NEXT:    xvst $xr0, $a0, 0 | 
 | ; CHECK-NEXT:    ret | 
 | entry: | 
 |   %v0 = load <4 x i64>, ptr %a0 | 
 |   %v1 = load <4 x i64>, ptr %a1 | 
 |   %v2 = xor <4 x i64> %v0, <i64 -1, i64 -1, i64 -1, i64 -1> | 
 |   %v3 = and <4 x i64> %v2, %v1 | 
 |   store <4 x i64> %v3, ptr %res | 
 |   ret void | 
 | } |