| ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 |
| ; RUN: opt -passes=gvn -S %s | FileCheck %s |
| |
| define void @redundant_unstrided_load(ptr %src) { |
| ; CHECK-LABEL: define void @redundant_unstrided_load( |
| ; CHECK-SAME: ptr [[SRC:%.*]]) { |
| ; CHECK-NEXT: [[ENTRY:.*:]] |
| ; CHECK-NEXT: [[SRC_OFFSET:%.*]] = getelementptr inbounds double, ptr [[SRC]], i32 8 |
| ; CHECK-NEXT: [[L:%.*]] = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr [[SRC_OFFSET]], i32 4, i1 false, i32 4, i32 2) |
| ; CHECK-NEXT: call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> [[L]], ptr [[SRC]], i32 4, i1 false, i32 4, i32 2) |
| ; CHECK-NEXT: call void @use(<8 x double> [[L]]) |
| ; CHECK-NEXT: call void @use(<8 x double> [[L]]) |
| ; CHECK-NEXT: ret void |
| ; |
| entry: |
| %src.offset = getelementptr inbounds double, ptr %src, i32 8 |
| %l = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr %src.offset, i32 4, i1 false, i32 4, i32 2) |
| call void @llvm.matrix.column.major.store.v8f64(<8 x double> %l, ptr %src, i32 4, i1 false, i32 4, i32 2) |
| %l.2 = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr %src.offset, i32 4, i1 false, i32 4, i32 2) |
| call void @use(<8 x double> %l) |
| call void @use(<8 x double> %l.2) |
| ret void |
| } |
| |
| define void @redundant_unstrided_load_non_matrix_store(ptr %src) { |
| ; CHECK-LABEL: define void @redundant_unstrided_load_non_matrix_store( |
| ; CHECK-SAME: ptr [[SRC:%.*]]) { |
| ; CHECK-NEXT: [[ENTRY:.*:]] |
| ; CHECK-NEXT: [[SRC_OFFSET:%.*]] = getelementptr inbounds double, ptr [[SRC]], i32 1 |
| ; CHECK-NEXT: [[L:%.*]] = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr [[SRC_OFFSET]], i32 4, i1 false, i32 4, i32 2) |
| ; CHECK-NEXT: store double 4.200000e+01, ptr [[SRC]], align 8 |
| ; CHECK-NEXT: call void @use(<8 x double> [[L]]) |
| ; CHECK-NEXT: call void @use(<8 x double> [[L]]) |
| ; CHECK-NEXT: ret void |
| ; |
| entry: |
| %src.offset = getelementptr inbounds double, ptr %src, i32 1 |
| %l = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr %src.offset, i32 4, i1 false, i32 4, i32 2) |
| store double 42.0, ptr %src |
| %l.2 = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr %src.offset, i32 4, i1 false, i32 4, i32 2) |
| call void @use(<8 x double> %l) |
| call void @use(<8 x double> %l.2) |
| ret void |
| } |
| |
| define void @redundant_strided_load(ptr %src) { |
| ; CHECK-LABEL: define void @redundant_strided_load( |
| ; CHECK-SAME: ptr [[SRC:%.*]]) { |
| ; CHECK-NEXT: [[ENTRY:.*:]] |
| ; CHECK-NEXT: [[SRC_OFFSET:%.*]] = getelementptr inbounds double, ptr [[SRC]], i32 16 |
| ; CHECK-NEXT: [[L:%.*]] = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr [[SRC_OFFSET]], i32 8, i1 false, i32 4, i32 2) |
| ; CHECK-NEXT: call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> [[L]], ptr [[SRC]], i32 8, i1 false, i32 4, i32 2) |
| ; CHECK-NEXT: call void @use(<8 x double> [[L]]) |
| ; CHECK-NEXT: call void @use(<8 x double> [[L]]) |
| ; CHECK-NEXT: ret void |
| ; |
| entry: |
| %src.offset = getelementptr inbounds double, ptr %src, i32 16 |
| %l = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr %src.offset, i32 8, i1 false, i32 4, i32 2) |
| call void @llvm.matrix.column.major.store.v8f64(<8 x double> %l, ptr %src, i32 8, i1 false, i32 4, i32 2) |
| %l.2 = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr %src.offset, i32 8, i1 false, i32 4, i32 2) |
| call void @use(<8 x double> %l) |
| call void @use(<8 x double> %l.2) |
| ret void |
| |
| } |
| |
| define void @redundant_strided_load_non_matrix_store(ptr %src) { |
| ; CHECK-LABEL: define void @redundant_strided_load_non_matrix_store( |
| ; CHECK-SAME: ptr [[SRC:%.*]]) { |
| ; CHECK-NEXT: [[ENTRY:.*:]] |
| ; CHECK-NEXT: [[SRC_OFFSET:%.*]] = getelementptr inbounds double, ptr [[SRC]], i32 16 |
| ; CHECK-NEXT: [[L:%.*]] = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr [[SRC_OFFSET]], i32 8, i1 false, i32 4, i32 2) |
| ; CHECK-NEXT: store double 4.200000e+01, ptr [[SRC]], align 8 |
| ; CHECK-NEXT: call void @use(<8 x double> [[L]]) |
| ; CHECK-NEXT: call void @use(<8 x double> [[L]]) |
| ; CHECK-NEXT: ret void |
| ; |
| entry: |
| %src.offset = getelementptr inbounds double, ptr %src, i32 16 |
| %l = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr %src.offset, i32 8, i1 false, i32 4, i32 2) |
| store double 42.0, ptr %src |
| %l.2 = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr %src.offset, i32 8, i1 false, i32 4, i32 2) |
| call void @use(<8 x double> %l) |
| call void @use(<8 x double> %l.2) |
| ret void |
| } |
| |
| define void @repeat_load_dimension_change_project(ptr %src) { |
| ; CHECK-LABEL: define void @repeat_load_dimension_change_project( |
| ; CHECK-SAME: ptr [[SRC:%.*]]) { |
| ; CHECK-NEXT: [[ENTRY:.*:]] |
| ; CHECK-NEXT: [[L:%.*]] = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr [[SRC]], i32 4, i1 false, i32 4, i32 2) |
| ; CHECK-NEXT: [[L_2:%.*]] = call <9 x double> @llvm.matrix.column.major.load.v9f64.i32(ptr [[SRC]], i32 3, i1 false, i32 3, i32 3) |
| ; CHECK-NEXT: [[L_3:%.*]] = shufflevector <9 x double> [[L_2]], <9 x double> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> |
| ; CHECK-NEXT: call void @use(<8 x double> [[L]]) |
| ; CHECK-NEXT: call void @use(<8 x double> [[L_3]]) |
| ; CHECK-NEXT: ret void |
| ; |
| entry: |
| %l = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr %src, i32 4, i1 false, i32 4, i32 2) |
| %l.2 = call <9 x double> @llvm.matrix.column.major.load.v9f64.i32(ptr %src, i32 3, i1 false, i32 3, i32 3) |
| %l.3 = shufflevector <9 x double> %l.2, <9 x double> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> |
| call void @use(<8 x double> %l) |
| call void @use(<8 x double> %l.3) |
| ret void |
| } |
| |
| define void @repeat_load_dimension_change_shuffle(ptr %src) { |
| ; CHECK-LABEL: define void @repeat_load_dimension_change_shuffle( |
| ; CHECK-SAME: ptr [[SRC:%.*]]) { |
| ; CHECK-NEXT: [[ENTRY:.*:]] |
| ; CHECK-NEXT: [[L:%.*]] = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr [[SRC]], i32 4, i1 false, i32 4, i32 2) |
| ; CHECK-NEXT: [[L_2:%.*]] = call <9 x double> @llvm.matrix.column.major.load.v9f64.i32(ptr [[SRC]], i32 3, i1 false, i32 3, i32 3) |
| ; CHECK-NEXT: [[L_3:%.*]] = shufflevector <9 x double> [[L_2]], <9 x double> zeroinitializer, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15> |
| ; CHECK-NEXT: call void @use(<8 x double> [[L]]) |
| ; CHECK-NEXT: call void @use(<8 x double> [[L_3]]) |
| ; CHECK-NEXT: ret void |
| ; |
| entry: |
| %l = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr %src, i32 4, i1 false, i32 4, i32 2) |
| %l.2 = call <9 x double> @llvm.matrix.column.major.load.v9f64.i32(ptr %src, i32 3, i1 false, i32 3, i32 3) |
| %l.3 = shufflevector <9 x double> %l.2, <9 x double> zeroinitializer, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15> |
| call void @use(<8 x double> %l) |
| call void @use(<8 x double> %l.3) |
| ret void |
| } |
| |
| declare <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr, i32, i1, i32, i32) |
| declare <9 x double> @llvm.matrix.column.major.load.v9f64.i32(ptr, i32, i1, i32, i32) |
| declare void @llvm.matrix.column.major.store.v8f64.i32(<8 x double>, ptr, i32, i1, i32, i32) |
| declare void @use(<8 x double>) |