[RISCV][test] Add (add x, C) -> (sub x, -C) multi-use and vector tests
diff --git a/llvm/test/CodeGen/RISCV/add-imm64-to-sub.ll b/llvm/test/CodeGen/RISCV/add-imm64-to-sub.ll
index ddcf4e1..8c251c0 100644
--- a/llvm/test/CodeGen/RISCV/add-imm64-to-sub.ll
+++ b/llvm/test/CodeGen/RISCV/add-imm64-to-sub.ll
@@ -56,6 +56,22 @@
; CHECK-NEXT: and a0, a0, a1
; CHECK-NEXT: ret
%add = add i64 %x, -1099511627775
- %xor = and i64 %add, -1099511627775
+ %and = and i64 %add, -1099511627775
+ ret i64 %and
+}
+
+define i64 @add_multiuse_const(i64 %x, i64 %y) {
+; CHECK-LABEL: add_multiuse_const:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a2, -1
+; CHECK-NEXT: slli a2, a2, 40
+; CHECK-NEXT: addi a2, a2, 1
+; CHECK-NEXT: add a0, a0, a2
+; CHECK-NEXT: add a1, a1, a2
+; CHECK-NEXT: xor a0, a0, a1
+; CHECK-NEXT: ret
+ %a = add i64 %x, -1099511627775
+ %b = add i64 %y, -1099511627775
+ %xor = xor i64 %a, %b
ret i64 %xor
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll
index b6490e0..1c62e0f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll
@@ -431,3 +431,61 @@
store <16 x i64> %vc, ptr %c
ret void
}
+
+define <2 x i64> @vadd_vx_v2i64_to_sub(<2 x i64> %va) {
+; RV32-LABEL: vadd_vx_v2i64_to_sub:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: li a0, -256
+; RV32-NEXT: li a1, 1
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: sw a0, 12(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
+; RV32-NEXT: vadd.vv v8, v8, v9
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: .cfi_def_cfa_offset 0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vadd_vx_v2i64_to_sub:
+; RV64: # %bb.0:
+; RV64-NEXT: li a0, -1
+; RV64-NEXT: slli a0, a0, 40
+; RV64-NEXT: addi a0, a0, 1
+; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV64-NEXT: vadd.vx v8, v8, a0
+; RV64-NEXT: ret
+ %v = add <2 x i64> splat (i64 -1099511627775), %va
+ ret <2 x i64> %v
+}
+
+define <2 x i64> @vadd_vx_v2i64_to_sub_swapped(<2 x i64> %va) {
+; RV32-LABEL: vadd_vx_v2i64_to_sub_swapped:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: li a0, -256
+; RV32-NEXT: li a1, 1
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: sw a0, 12(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
+; RV32-NEXT: vadd.vv v8, v8, v9
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: .cfi_def_cfa_offset 0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vadd_vx_v2i64_to_sub_swapped:
+; RV64: # %bb.0:
+; RV64-NEXT: li a0, -1
+; RV64-NEXT: slli a0, a0, 40
+; RV64-NEXT: addi a0, a0, 1
+; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV64-NEXT: vadd.vx v8, v8, a0
+; RV64-NEXT: ret
+ %v = add <2 x i64> %va, splat (i64 -1099511627775)
+ ret <2 x i64> %v
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll
index 7ee8179..1151123 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll
@@ -1425,3 +1425,59 @@
%v = call <32 x i64> @llvm.vp.add.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 27)
ret <32 x i64> %v
}
+
+define <2 x i64> @vadd_vx_v2i64_to_sub(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) nounwind {
+; RV32-LABEL: vadd_vx_v2i64_to_sub:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: li a1, -256
+; RV32-NEXT: li a2, 1
+; RV32-NEXT: sw a2, 8(sp)
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: addi a1, sp, 8
+; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a1), zero
+; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32-NEXT: vadd.vv v8, v9, v8, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vadd_vx_v2i64_to_sub:
+; RV64: # %bb.0:
+; RV64-NEXT: li a1, -1
+; RV64-NEXT: slli a1, a1, 40
+; RV64-NEXT: addi a1, a1, 1
+; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV64-NEXT: vadd.vx v8, v8, a1, v0.t
+; RV64-NEXT: ret
+ %v = call <2 x i64> @llvm.vp.add.v2i64(<2 x i64> splat (i64 -1099511627775), <2 x i64> %va, <2 x i1> %m, i32 %evl)
+ ret <2 x i64> %v
+}
+
+define <2 x i64> @vadd_vx_v2i64_to_sub_swapped(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) nounwind {
+; RV32-LABEL: vadd_vx_v2i64_to_sub_swapped:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: li a1, -256
+; RV32-NEXT: li a2, 1
+; RV32-NEXT: sw a2, 8(sp)
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: addi a1, sp, 8
+; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a1), zero
+; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32-NEXT: vadd.vv v8, v8, v9, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vadd_vx_v2i64_to_sub_swapped:
+; RV64: # %bb.0:
+; RV64-NEXT: li a1, -1
+; RV64-NEXT: slli a1, a1, 40
+; RV64-NEXT: addi a1, a1, 1
+; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV64-NEXT: vadd.vx v8, v8, a1, v0.t
+; RV64-NEXT: ret
+ %v = call <2 x i64> @llvm.vp.add.v2i64(<2 x i64> %va, <2 x i64> splat (i64 -1099511627775), <2 x i1> %m, i32 %evl)
+ ret <2 x i64> %v
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-sdnode.ll
index ac22e11..a95ad7f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vadd-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vadd-sdnode.ll
@@ -865,3 +865,57 @@
%vd = add <vscale x 8 x i32> %vc, %vs
ret <vscale x 8 x i32> %vd
}
+
+define <vscale x 1 x i64> @vadd_vx_imm64_to_sub(<vscale x 1 x i64> %va) nounwind {
+; RV32-LABEL: vadd_vx_imm64_to_sub:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: li a0, -256
+; RV32-NEXT: li a1, 1
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: sw a0, 12(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
+; RV32-NEXT: vadd.vv v8, v8, v9
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vadd_vx_imm64_to_sub:
+; RV64: # %bb.0:
+; RV64-NEXT: li a0, -1
+; RV64-NEXT: slli a0, a0, 40
+; RV64-NEXT: addi a0, a0, 1
+; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, ma
+; RV64-NEXT: vadd.vx v8, v8, a0
+; RV64-NEXT: ret
+ %vc = add <vscale x 1 x i64> splat (i64 -1099511627775), %va
+ ret <vscale x 1 x i64> %vc
+}
+
+define <vscale x 1 x i64> @vadd_vx_imm64_to_sub_swapped(<vscale x 1 x i64> %va) nounwind {
+; RV32-LABEL: vadd_vx_imm64_to_sub_swapped:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: li a0, -256
+; RV32-NEXT: li a1, 1
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: sw a0, 12(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
+; RV32-NEXT: vadd.vv v8, v8, v9
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vadd_vx_imm64_to_sub_swapped:
+; RV64: # %bb.0:
+; RV64-NEXT: li a0, -1
+; RV64-NEXT: slli a0, a0, 40
+; RV64-NEXT: addi a0, a0, 1
+; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, ma
+; RV64-NEXT: vadd.vx v8, v8, a0
+; RV64-NEXT: ret
+ %vc = add <vscale x 1 x i64> %va, splat (i64 -1099511627775)
+ ret <vscale x 1 x i64> %vc
+}