| // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py |
| // RUN: %clang_cc1 -O3 -triple powerpc64le-unknown-unknown -target-cpu future \ |
| // RUN: -emit-llvm %s -o - | FileCheck %s |
| // RUN: %clang_cc1 -O3 -triple powerpc64-ibm-aix -target-cpu future \ |
| // RUN: -emit-llvm %s -o - | FileCheck %s |
| |
| |
| // CHECK-LABEL: @test_dmxvi8gerx4( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[TMP0:%.*]] = load <256 x i1>, ptr [[VPP:%.*]], align 32, !tbaa [[TBAA2:![0-9]+]] |
| // CHECK-NEXT: [[TMP1:%.*]] = tail call <1024 x i1> @llvm.ppc.mma.dmxvi8gerx4(<256 x i1> [[TMP0]], <16 x i8> [[VC:%.*]]) |
| // CHECK-NEXT: store <1024 x i1> [[TMP1]], ptr [[RESP:%.*]], align 128, !tbaa [[TBAA6:![0-9]+]] |
| // CHECK-NEXT: ret void |
| // |
| void test_dmxvi8gerx4(unsigned char *vdmrp, unsigned char *vpp, vector unsigned char vc, unsigned char *resp) { |
| __dmr1024 vdmr = *((__dmr1024 *)vdmrp); |
| __vector_pair vp = *((__vector_pair *)vpp); |
| __builtin_mma_dmxvi8gerx4(&vdmr, vp, vc); |
| *((__dmr1024 *)resp) = vdmr; |
| } |
| |
| // CHECK-LABEL: @test_pmdmxvi8gerx4( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[TMP0:%.*]] = load <256 x i1>, ptr [[VPP:%.*]], align 32, !tbaa [[TBAA2]] |
| // CHECK-NEXT: [[TMP1:%.*]] = tail call <1024 x i1> @llvm.ppc.mma.pmdmxvi8gerx4(<256 x i1> [[TMP0]], <16 x i8> [[VC:%.*]], i32 0, i32 0, i32 0) |
| // CHECK-NEXT: store <1024 x i1> [[TMP1]], ptr [[RESP:%.*]], align 128, !tbaa [[TBAA6]] |
| // CHECK-NEXT: ret void |
| // |
| void test_pmdmxvi8gerx4(unsigned char *vdmrp, unsigned char *vpp, vector unsigned char vc, unsigned char *resp) { |
| __dmr1024 vdmr = *((__dmr1024 *)vdmrp); |
| __vector_pair vp = *((__vector_pair *)vpp); |
| __builtin_mma_pmdmxvi8gerx4(&vdmr, vp, vc, 0, 0, 0); |
| *((__dmr1024 *)resp) = vdmr; |
| } |
| |
| // CHECK-LABEL: @test_dmxvi8gerx4pp( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[TMP0:%.*]] = load <1024 x i1>, ptr [[VDMRP:%.*]], align 128, !tbaa [[TBAA6]] |
| // CHECK-NEXT: [[TMP1:%.*]] = load <256 x i1>, ptr [[VPP:%.*]], align 32, !tbaa [[TBAA2]] |
| // CHECK-NEXT: [[TMP2:%.*]] = tail call <1024 x i1> @llvm.ppc.mma.dmxvi8gerx4pp(<1024 x i1> [[TMP0]], <256 x i1> [[TMP1]], <16 x i8> [[VC:%.*]]) |
| // CHECK-NEXT: store <1024 x i1> [[TMP2]], ptr [[RESP:%.*]], align 128, !tbaa [[TBAA6]] |
| // CHECK-NEXT: ret void |
| // |
| void test_dmxvi8gerx4pp(unsigned char *vdmrp, unsigned char *vpp, vector unsigned char vc, unsigned char *resp) { |
| __dmr1024 vdmr = *((__dmr1024 *)vdmrp); |
| __vector_pair vp = *((__vector_pair *)vpp); |
| __builtin_mma_dmxvi8gerx4pp(&vdmr, vp, vc); |
| *((__dmr1024 *)resp) = vdmr; |
| } |
| |
| // CHECK-LABEL: @test_pmdmxvi8gerx4pp( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[TMP0:%.*]] = load <1024 x i1>, ptr [[VDMRP:%.*]], align 128, !tbaa [[TBAA6]] |
| // CHECK-NEXT: [[TMP1:%.*]] = load <256 x i1>, ptr [[VPP:%.*]], align 32, !tbaa [[TBAA2]] |
| // CHECK-NEXT: [[TMP2:%.*]] = tail call <1024 x i1> @llvm.ppc.mma.pmdmxvi8gerx4pp(<1024 x i1> [[TMP0]], <256 x i1> [[TMP1]], <16 x i8> [[VC:%.*]], i32 0, i32 0, i32 0) |
| // CHECK-NEXT: store <1024 x i1> [[TMP2]], ptr [[RESP:%.*]], align 128, !tbaa [[TBAA6]] |
| // CHECK-NEXT: ret void |
| // |
| void test_pmdmxvi8gerx4pp(unsigned char *vdmrp, unsigned char *vpp, vector unsigned char vc, unsigned char *resp) { |
| __dmr1024 vdmr = *((__dmr1024 *)vdmrp); |
| __vector_pair vp = *((__vector_pair *)vpp); |
| __builtin_mma_pmdmxvi8gerx4pp(&vdmr, vp, vc, 0, 0, 0); |
| *((__dmr1024 *)resp) = vdmr; |
| } |
| |
| // CHECK-LABEL: @test_dmxvi8gerx4spp( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[TMP0:%.*]] = load <1024 x i1>, ptr [[VDMRP:%.*]], align 128, !tbaa [[TBAA6]] |
| // CHECK-NEXT: [[TMP1:%.*]] = load <256 x i1>, ptr [[VPP:%.*]], align 32, !tbaa [[TBAA2]] |
| // CHECK-NEXT: [[TMP2:%.*]] = tail call <1024 x i1> @llvm.ppc.mma.dmxvi8gerx4spp(<1024 x i1> [[TMP0]], <256 x i1> [[TMP1]], <16 x i8> [[VC:%.*]]) |
| // CHECK-NEXT: store <1024 x i1> [[TMP2]], ptr [[RESP:%.*]], align 128, !tbaa [[TBAA6]] |
| // CHECK-NEXT: ret void |
| // |
| void test_dmxvi8gerx4spp(unsigned char *vdmrp, unsigned char *vpp, vector unsigned char vc, unsigned char *resp) { |
| __dmr1024 vdmr = *((__dmr1024 *)vdmrp); |
| __vector_pair vp = *((__vector_pair *)vpp); |
| __builtin_mma_dmxvi8gerx4spp(&vdmr, vp, vc); |
| *((__dmr1024 *)resp) = vdmr; |
| } |
| |
| // CHECK-LABEL: @test_pmdmxvi8gerx4spp( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[TMP0:%.*]] = load <1024 x i1>, ptr [[VDMRP:%.*]], align 128, !tbaa [[TBAA6]] |
| // CHECK-NEXT: [[TMP1:%.*]] = load <256 x i1>, ptr [[VPP:%.*]], align 32, !tbaa [[TBAA2]] |
| // CHECK-NEXT: [[TMP2:%.*]] = tail call <1024 x i1> @llvm.ppc.mma.pmdmxvi8gerx4spp(<1024 x i1> [[TMP0]], <256 x i1> [[TMP1]], <16 x i8> [[VC:%.*]], i32 0, i32 0, i32 0) |
| // CHECK-NEXT: store <1024 x i1> [[TMP2]], ptr [[RESP:%.*]], align 128, !tbaa [[TBAA6]] |
| // CHECK-NEXT: ret void |
| // |
| void test_pmdmxvi8gerx4spp(unsigned char *vdmrp, unsigned char *vpp, vector unsigned char vc, unsigned char *resp) { |
| __dmr1024 vdmr = *((__dmr1024 *)vdmrp); |
| __vector_pair vp = *((__vector_pair *)vpp); |
| __builtin_mma_pmdmxvi8gerx4spp(&vdmr, vp, vc, 0, 0, 0); |
| *((__dmr1024 *)resp) = vdmr; |
| } |