[SLP][NFC]Add a test with missed splat ordering for loads, NFC
diff --git a/llvm/test/Transforms/SLPVectorizer/RISCV/loads-ordering.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/loads-ordering.ll
new file mode 100644
index 0000000..928cbe3
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/RISCV/loads-ordering.ll
@@ -0,0 +1,47 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -S --passes=slp-vectorizer -mtriple=riscv64-unknown-linux-gnu -mattr=+v < %s | FileCheck %s
+
+define fastcc void @rephase(ptr %phases_in, ptr %157, i64 %158) {
+; CHECK-LABEL: define fastcc void @rephase(
+; CHECK-SAME: ptr [[PHASES_IN:%.*]], ptr [[TMP0:%.*]], i64 [[TMP1:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[IND_END11:%.*]] = getelementptr i8, ptr [[TMP0]], i64 [[TMP1]]
+; CHECK-NEXT: [[TMP2:%.*]] = load double, ptr [[TMP0]], align 8
+; CHECK-NEXT: [[IMAG_247:%.*]] = getelementptr i8, ptr [[IND_END11]], i64 408
+; CHECK-NEXT: [[MUL35_248:%.*]] = fmul double [[TMP2]], 0.000000e+00
+; CHECK-NEXT: store double [[MUL35_248]], ptr [[IMAG_247]], align 8
+; CHECK-NEXT: [[ARRAYIDX23_1_249:%.*]] = getelementptr i8, ptr [[IND_END11]], i64 416
+; CHECK-NEXT: [[MUL_1_250:%.*]] = fmul double [[TMP2]], 0.000000e+00
+; CHECK-NEXT: store double [[MUL_1_250]], ptr [[ARRAYIDX23_1_249]], align 8
+; CHECK-NEXT: [[IMAG_1_251:%.*]] = getelementptr i8, ptr [[IND_END11]], i64 424
+; CHECK-NEXT: [[TMP3:%.*]] = load double, ptr [[IMAG_1_251]], align 8
+; CHECK-NEXT: [[MUL35_1_252:%.*]] = fmul double [[TMP2]], [[TMP3]]
+; CHECK-NEXT: store double [[MUL35_1_252]], ptr [[IMAG_1_251]], align 8
+; CHECK-NEXT: [[ARRAYIDX23_2_253:%.*]] = getelementptr i8, ptr [[IND_END11]], i64 432
+; CHECK-NEXT: [[TMP4:%.*]] = load double, ptr [[ARRAYIDX23_2_253]], align 8
+; CHECK-NEXT: [[MUL_2_254:%.*]] = fmul double [[TMP2]], [[TMP4]]
+; CHECK-NEXT: store double [[MUL_2_254]], ptr [[ARRAYIDX23_2_253]], align 8
+; CHECK-NEXT: store double [[TMP2]], ptr [[PHASES_IN]], align 8
+; CHECK-NEXT: ret void
+;
+entry:
+ %ind.end11 = getelementptr i8, ptr %157, i64 %158
+ %186 = load double, ptr %157, align 8
+ %imag.247 = getelementptr i8, ptr %ind.end11, i64 408
+ %mul35.248 = fmul double %186, 0.000000e+00
+ store double %mul35.248, ptr %imag.247, align 8
+ %arrayidx23.1.249 = getelementptr i8, ptr %ind.end11, i64 416
+ %mul.1.250 = fmul double %186, 0.000000e+00
+ store double %mul.1.250, ptr %arrayidx23.1.249, align 8
+ %imag.1.251 = getelementptr i8, ptr %ind.end11, i64 424
+ %187 = load double, ptr %imag.1.251, align 8
+ %mul35.1.252 = fmul double %186, %187
+ store double %mul35.1.252, ptr %imag.1.251, align 8
+ %arrayidx23.2.253 = getelementptr i8, ptr %ind.end11, i64 432
+ %188 = load double, ptr %arrayidx23.2.253, align 8
+ %mul.2.254 = fmul double %186, %188
+ store double %mul.2.254, ptr %arrayidx23.2.253, align 8
+ store double %186, ptr %phases_in, align 8
+ ret void
+}
+