| // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 |
| // RUN: %clang_cc1 -triple x86_64-unknown-unknown -emit-llvm -o - %s | FileCheck %s |
| |
| typedef int v8i __attribute__((ext_vector_type(8))); |
| typedef _Bool v8b __attribute__((ext_vector_type(8))); |
| |
| // CHECK-LABEL: define dso_local <8 x i32> @test_load( |
| // CHECK-SAME: i8 noundef [[M_COERCE:%.*]], ptr noundef [[P:%.*]]) #[[ATTR0:[0-9]+]] { |
| // CHECK-NEXT: [[ENTRY:.*:]] |
| // CHECK-NEXT: [[M:%.*]] = alloca i8, align 1 |
| // CHECK-NEXT: [[M_ADDR:%.*]] = alloca i8, align 1 |
| // CHECK-NEXT: [[P_ADDR:%.*]] = alloca ptr, align 8 |
| // CHECK-NEXT: store i8 [[M_COERCE]], ptr [[M]], align 1 |
| // CHECK-NEXT: [[LOAD_BITS:%.*]] = load i8, ptr [[M]], align 1 |
| // CHECK-NEXT: [[M1:%.*]] = bitcast i8 [[LOAD_BITS]] to <8 x i1> |
| // CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i1> [[M1]] to i8 |
| // CHECK-NEXT: store i8 [[TMP0]], ptr [[M_ADDR]], align 1 |
| // CHECK-NEXT: store ptr [[P]], ptr [[P_ADDR]], align 8 |
| // CHECK-NEXT: [[LOAD_BITS2:%.*]] = load i8, ptr [[M_ADDR]], align 1 |
| // CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 [[LOAD_BITS2]] to <8 x i1> |
| // CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[P_ADDR]], align 8 |
| // CHECK-NEXT: [[MASKED_LOAD:%.*]] = call <8 x i32> @llvm.masked.load.v8i32.p0(ptr [[TMP2]], i32 32, <8 x i1> [[TMP1]], <8 x i32> poison) |
| // CHECK-NEXT: ret <8 x i32> [[MASKED_LOAD]] |
| // |
| v8i test_load(v8b m, v8i *p) { |
| return __builtin_masked_load(m, p); |
| } |
| |
| // CHECK-LABEL: define dso_local <8 x i32> @test_load_passthru( |
| // CHECK-SAME: i8 noundef [[M_COERCE:%.*]], ptr noundef [[P:%.*]], ptr noundef byval(<8 x i32>) align 32 [[TMP0:%.*]]) #[[ATTR0]] { |
| // CHECK-NEXT: [[ENTRY:.*:]] |
| // CHECK-NEXT: [[M:%.*]] = alloca i8, align 1 |
| // CHECK-NEXT: [[M_ADDR:%.*]] = alloca i8, align 1 |
| // CHECK-NEXT: [[P_ADDR:%.*]] = alloca ptr, align 8 |
| // CHECK-NEXT: [[T_ADDR:%.*]] = alloca <8 x i32>, align 32 |
| // CHECK-NEXT: store i8 [[M_COERCE]], ptr [[M]], align 1 |
| // CHECK-NEXT: [[LOAD_BITS:%.*]] = load i8, ptr [[M]], align 1 |
| // CHECK-NEXT: [[M1:%.*]] = bitcast i8 [[LOAD_BITS]] to <8 x i1> |
| // CHECK-NEXT: [[T:%.*]] = load <8 x i32>, ptr [[TMP0]], align 32 |
| // CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i1> [[M1]] to i8 |
| // CHECK-NEXT: store i8 [[TMP1]], ptr [[M_ADDR]], align 1 |
| // CHECK-NEXT: store ptr [[P]], ptr [[P_ADDR]], align 8 |
| // CHECK-NEXT: store <8 x i32> [[T]], ptr [[T_ADDR]], align 32 |
| // CHECK-NEXT: [[LOAD_BITS2:%.*]] = load i8, ptr [[M_ADDR]], align 1 |
| // CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[LOAD_BITS2]] to <8 x i1> |
| // CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[P_ADDR]], align 8 |
| // CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr [[T_ADDR]], align 32 |
| // CHECK-NEXT: [[MASKED_LOAD:%.*]] = call <8 x i32> @llvm.masked.load.v8i32.p0(ptr [[TMP3]], i32 32, <8 x i1> [[TMP2]], <8 x i32> [[TMP4]]) |
| // CHECK-NEXT: ret <8 x i32> [[MASKED_LOAD]] |
| // |
| v8i test_load_passthru(v8b m, v8i *p, v8i t) { |
| return __builtin_masked_load(m, p, t); |
| } |
| |
| // CHECK-LABEL: define dso_local <8 x i32> @test_load_expand( |
| // CHECK-SAME: i8 noundef [[M_COERCE:%.*]], ptr noundef [[P:%.*]], ptr noundef byval(<8 x i32>) align 32 [[TMP0:%.*]]) #[[ATTR0]] { |
| // CHECK-NEXT: [[ENTRY:.*:]] |
| // CHECK-NEXT: [[M:%.*]] = alloca i8, align 1 |
| // CHECK-NEXT: [[M_ADDR:%.*]] = alloca i8, align 1 |
| // CHECK-NEXT: [[P_ADDR:%.*]] = alloca ptr, align 8 |
| // CHECK-NEXT: [[T_ADDR:%.*]] = alloca <8 x i32>, align 32 |
| // CHECK-NEXT: store i8 [[M_COERCE]], ptr [[M]], align 1 |
| // CHECK-NEXT: [[LOAD_BITS:%.*]] = load i8, ptr [[M]], align 1 |
| // CHECK-NEXT: [[M1:%.*]] = bitcast i8 [[LOAD_BITS]] to <8 x i1> |
| // CHECK-NEXT: [[T:%.*]] = load <8 x i32>, ptr [[TMP0]], align 32 |
| // CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i1> [[M1]] to i8 |
| // CHECK-NEXT: store i8 [[TMP1]], ptr [[M_ADDR]], align 1 |
| // CHECK-NEXT: store ptr [[P]], ptr [[P_ADDR]], align 8 |
| // CHECK-NEXT: store <8 x i32> [[T]], ptr [[T_ADDR]], align 32 |
| // CHECK-NEXT: [[LOAD_BITS2:%.*]] = load i8, ptr [[M_ADDR]], align 1 |
| // CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[LOAD_BITS2]] to <8 x i1> |
| // CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[P_ADDR]], align 8 |
| // CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr [[T_ADDR]], align 32 |
| // CHECK-NEXT: [[MASKED_EXPAND_LOAD:%.*]] = call <8 x i32> @llvm.masked.expandload.v8i32(ptr [[TMP3]], <8 x i1> [[TMP2]], <8 x i32> [[TMP4]]) |
| // CHECK-NEXT: ret <8 x i32> [[MASKED_EXPAND_LOAD]] |
| // |
| v8i test_load_expand(v8b m, v8i *p, v8i t) { |
| return __builtin_masked_expand_load(m, p, t); |
| } |
| |
| // CHECK-LABEL: define dso_local void @test_store( |
| // CHECK-SAME: i8 noundef [[M_COERCE:%.*]], ptr noundef byval(<8 x i32>) align 32 [[TMP0:%.*]], ptr noundef [[P:%.*]]) #[[ATTR3:[0-9]+]] { |
| // CHECK-NEXT: [[ENTRY:.*:]] |
| // CHECK-NEXT: [[M:%.*]] = alloca i8, align 1 |
| // CHECK-NEXT: [[M_ADDR:%.*]] = alloca i8, align 1 |
| // CHECK-NEXT: [[V_ADDR:%.*]] = alloca <8 x i32>, align 32 |
| // CHECK-NEXT: [[P_ADDR:%.*]] = alloca ptr, align 8 |
| // CHECK-NEXT: store i8 [[M_COERCE]], ptr [[M]], align 1 |
| // CHECK-NEXT: [[LOAD_BITS:%.*]] = load i8, ptr [[M]], align 1 |
| // CHECK-NEXT: [[M1:%.*]] = bitcast i8 [[LOAD_BITS]] to <8 x i1> |
| // CHECK-NEXT: [[V:%.*]] = load <8 x i32>, ptr [[TMP0]], align 32 |
| // CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i1> [[M1]] to i8 |
| // CHECK-NEXT: store i8 [[TMP1]], ptr [[M_ADDR]], align 1 |
| // CHECK-NEXT: store <8 x i32> [[V]], ptr [[V_ADDR]], align 32 |
| // CHECK-NEXT: store ptr [[P]], ptr [[P_ADDR]], align 8 |
| // CHECK-NEXT: [[LOAD_BITS2:%.*]] = load i8, ptr [[M_ADDR]], align 1 |
| // CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[LOAD_BITS2]] to <8 x i1> |
| // CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr [[V_ADDR]], align 32 |
| // CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[P_ADDR]], align 8 |
| // CHECK-NEXT: call void @llvm.masked.store.v8i32.p0(<8 x i32> [[TMP3]], ptr [[TMP4]], i32 32, <8 x i1> [[TMP2]]) |
| // CHECK-NEXT: ret void |
| // |
| void test_store(v8b m, v8i v, v8i *p) { |
| __builtin_masked_store(m, v, p); |
| } |
| |
| // CHECK-LABEL: define dso_local void @test_compress_store( |
| // CHECK-SAME: i8 noundef [[M_COERCE:%.*]], ptr noundef byval(<8 x i32>) align 32 [[TMP0:%.*]], ptr noundef [[P:%.*]]) #[[ATTR3]] { |
| // CHECK-NEXT: [[ENTRY:.*:]] |
| // CHECK-NEXT: [[M:%.*]] = alloca i8, align 1 |
| // CHECK-NEXT: [[M_ADDR:%.*]] = alloca i8, align 1 |
| // CHECK-NEXT: [[V_ADDR:%.*]] = alloca <8 x i32>, align 32 |
| // CHECK-NEXT: [[P_ADDR:%.*]] = alloca ptr, align 8 |
| // CHECK-NEXT: store i8 [[M_COERCE]], ptr [[M]], align 1 |
| // CHECK-NEXT: [[LOAD_BITS:%.*]] = load i8, ptr [[M]], align 1 |
| // CHECK-NEXT: [[M1:%.*]] = bitcast i8 [[LOAD_BITS]] to <8 x i1> |
| // CHECK-NEXT: [[V:%.*]] = load <8 x i32>, ptr [[TMP0]], align 32 |
| // CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i1> [[M1]] to i8 |
| // CHECK-NEXT: store i8 [[TMP1]], ptr [[M_ADDR]], align 1 |
| // CHECK-NEXT: store <8 x i32> [[V]], ptr [[V_ADDR]], align 32 |
| // CHECK-NEXT: store ptr [[P]], ptr [[P_ADDR]], align 8 |
| // CHECK-NEXT: [[LOAD_BITS2:%.*]] = load i8, ptr [[M_ADDR]], align 1 |
| // CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[LOAD_BITS2]] to <8 x i1> |
| // CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr [[V_ADDR]], align 32 |
| // CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[P_ADDR]], align 8 |
| // CHECK-NEXT: call void @llvm.masked.compressstore.v8i32(<8 x i32> [[TMP3]], ptr [[TMP4]], <8 x i1> [[TMP2]]) |
| // CHECK-NEXT: ret void |
| // |
| void test_compress_store(v8b m, v8i v, v8i *p) { |
| __builtin_masked_compress_store(m, v, p); |
| } |