| ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py |
| ; RUN: opt -S -passes=instcombine %s | FileCheck %s |
| ; RUN: opt -S -passes=instcombine %s \ |
| ; RUN: -use-constant-int-for-fixed-length-splat \ |
| ; RUN -use-constant-fp-for-fixed-length-splat \ |
| ; RUN: -use-constant-int-for-scalable-splat \ |
| ; RUN: -use-constant-fp-for-scalable-splat | FileCheck %s |
| |
| define <vscale x 4 x i32> @insert_div() { |
| ; CHECK-LABEL: @insert_div( |
| ; CHECK-NEXT: entry: |
| ; CHECK-NEXT: [[DIV:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> poison, <4 x i32> splat (i32 3), i64 0) |
| ; CHECK-NEXT: ret <vscale x 4 x i32> [[DIV]] |
| ; |
| entry: |
| %0 = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> poison, <4 x i32> splat (i32 9), i64 0) |
| %div = udiv <vscale x 4 x i32> %0, splat (i32 3) |
| ret <vscale x 4 x i32> %div |
| } |
| |
| define <vscale x 4 x i32> @insert_div_splat_lhs() { |
| ; CHECK-LABEL: @insert_div_splat_lhs( |
| ; CHECK-NEXT: entry: |
| ; CHECK-NEXT: [[DIV:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> splat (i32 5), <4 x i32> splat (i32 2), i64 0) |
| ; CHECK-NEXT: ret <vscale x 4 x i32> [[DIV]] |
| ; |
| entry: |
| %0 = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> splat(i32 2), <4 x i32> splat (i32 5), i64 0) |
| %div = udiv <vscale x 4 x i32> splat (i32 10), %0 |
| ret <vscale x 4 x i32> %div |
| } |
| |
| define <vscale x 4 x i32> @insert_div_mixed_splat() { |
| ; CHECK-LABEL: @insert_div_mixed_splat( |
| ; CHECK-NEXT: entry: |
| ; CHECK-NEXT: [[DIV:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> splat (i32 6), <4 x i32> splat (i32 3), i64 0) |
| ; CHECK-NEXT: ret <vscale x 4 x i32> [[DIV]] |
| ; |
| entry: |
| %0 = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> splat (i32 18), <4 x i32> splat (i32 9), i64 0) |
| %div = udiv <vscale x 4 x i32> %0, splat (i32 3) |
| ret <vscale x 4 x i32> %div |
| } |
| |
| define <vscale x 4 x i32> @insert_mul() { |
| ; CHECK-LABEL: @insert_mul( |
| ; CHECK-NEXT: entry: |
| ; CHECK-NEXT: [[MUL:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> poison, <4 x i32> splat (i32 7), i64 4) |
| ; CHECK-NEXT: ret <vscale x 4 x i32> [[MUL]] |
| ; |
| entry: |
| %0 = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> poison, <4 x i32> splat (i32 1), i64 4) |
| %mul = mul <vscale x 4 x i32> %0, splat (i32 7) |
| ret <vscale x 4 x i32> %mul |
| } |
| |
| define <vscale x 4 x i32> @insert_add() { |
| ; CHECK-LABEL: @insert_add( |
| ; CHECK-NEXT: entry: |
| ; CHECK-NEXT: [[ADD:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> poison, <4 x i32> splat (i32 16), i64 0) |
| ; CHECK-NEXT: ret <vscale x 4 x i32> [[ADD]] |
| ; |
| entry: |
| %0 = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> poison, <4 x i32> splat (i32 5), i64 0) |
| %add = add <vscale x 4 x i32> %0, splat (i32 11) |
| ret <vscale x 4 x i32> %add |
| } |
| |
| define <vscale x 4 x i32> @insert_add_non_splat_subvector() { |
| ; CHECK-LABEL: @insert_add_non_splat_subvector( |
| ; CHECK-NEXT: entry: |
| ; CHECK-NEXT: [[ADD:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> poison, <4 x i32> <i32 101, i32 102, i32 103, i32 104>, i64 0) |
| ; CHECK-NEXT: ret <vscale x 4 x i32> [[ADD]] |
| ; |
| entry: |
| %0 = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> poison, <4 x i32> <i32 1, i32 2, i32 3, i32 4>, i64 0) |
| %add = add <vscale x 4 x i32> %0, splat (i32 100) |
| ret <vscale x 4 x i32> %add |
| } |
| |
| define <vscale x 4 x float> @insert_add_fp() { |
| ; CHECK-LABEL: @insert_add_fp( |
| ; CHECK-NEXT: entry: |
| ; CHECK-NEXT: [[ADD:%.*]] = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v4f32(<vscale x 4 x float> splat (float 6.250000e+00), <4 x float> splat (float 5.500000e+00), i64 0) |
| ; CHECK-NEXT: ret <vscale x 4 x float> [[ADD]] |
| ; |
| entry: |
| %0 = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v4f32(<vscale x 4 x float> splat(float 1.25), <4 x float> splat (float 0.5), i64 0) |
| %add = fadd <vscale x 4 x float> %0, splat (float 5.0) |
| ret <vscale x 4 x float> %add |
| } |
| |
| define <vscale x 8 x i32> @insert_add_scalable_subvector() { |
| ; CHECK-LABEL: @insert_add_scalable_subvector( |
| ; CHECK-NEXT: entry: |
| ; CHECK-NEXT: [[ADD:%.*]] = call <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> splat (i32 20), <vscale x 4 x i32> splat (i32 -4), i64 0) |
| ; CHECK-NEXT: ret <vscale x 8 x i32> [[ADD]] |
| ; |
| entry: |
| %0 = call <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> splat(i32 16), <vscale x 4 x i32> splat (i32 -8), i64 0) |
| %add = add <vscale x 8 x i32> %0, splat (i32 4) |
| ret <vscale x 8 x i32> %add |
| } |
| |
| define <vscale x 4 x i32> @insert_sub() { |
| ; CHECK-LABEL: @insert_sub( |
| ; CHECK-NEXT: entry: |
| ; CHECK-NEXT: [[SUB:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> poison, <4 x i32> zeroinitializer, i64 8) |
| ; CHECK-NEXT: ret <vscale x 4 x i32> [[SUB]] |
| ; |
| entry: |
| %0 = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> poison, <4 x i32> splat (i32 11), i64 8) |
| %sub = add <vscale x 4 x i32> %0, splat (i32 -11) |
| ret <vscale x 4 x i32> %sub |
| } |
| |
| define <vscale x 4 x i32> @insert_and_partially_undef() { |
| ; CHECK-LABEL: @insert_and_partially_undef( |
| ; CHECK-NEXT: entry: |
| ; CHECK-NEXT: [[AND:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> zeroinitializer, <4 x i32> splat (i32 4), i64 0) |
| ; CHECK-NEXT: ret <vscale x 4 x i32> [[AND]] |
| ; |
| entry: |
| %0 = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> undef, <4 x i32> splat (i32 6), i64 0) |
| %and = and <vscale x 4 x i32> %0, splat (i32 4) |
| ret <vscale x 4 x i32> %and |
| } |
| |
| define <vscale x 4 x i32> @insert_fold_chain() { |
| ; CHECK-LABEL: @insert_fold_chain( |
| ; CHECK-NEXT: entry: |
| ; CHECK-NEXT: [[ADD:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> splat (i32 11), <4 x i32> splat (i32 8), i64 0) |
| ; CHECK-NEXT: ret <vscale x 4 x i32> [[ADD]] |
| ; |
| entry: |
| %0 = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> splat (i32 21), <4 x i32> splat (i32 12), i64 0) |
| %div = udiv <vscale x 4 x i32> %0, splat (i32 3) |
| %add = add <vscale x 4 x i32> %div, splat (i32 4) |
| ret <vscale x 4 x i32> %add |
| } |
| |
| ; TODO: This could be folded more. |
| define <vscale x 4 x i32> @insert_add_both_insert_vector() { |
| ; CHECK-LABEL: @insert_add_both_insert_vector( |
| ; CHECK-NEXT: entry: |
| ; CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> splat (i32 10), <4 x i32> splat (i32 5), i64 0) |
| ; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> splat (i32 -1), <4 x i32> splat (i32 2), i64 0) |
| ; CHECK-NEXT: [[ADD:%.*]] = add <vscale x 4 x i32> [[TMP0]], [[TMP1]] |
| ; CHECK-NEXT: ret <vscale x 4 x i32> [[ADD]] |
| ; |
| entry: |
| %0 = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> splat(i32 10), <4 x i32> splat (i32 5), i64 0) |
| %1 = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> splat(i32 -1), <4 x i32> splat (i32 2), i64 0) |
| %add = add <vscale x 4 x i32> %0, %1 |
| ret <vscale x 4 x i32> %add |
| } |