| // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py |
| // RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-zbr -emit-llvm %s -o - \ |
| // RUN: | FileCheck %s -check-prefix=RV64ZBR |
| |
| // RV64ZBR-LABEL: @crc32_b( |
| // RV64ZBR-NEXT: entry: |
| // RV64ZBR-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 |
| // RV64ZBR-NEXT: store i64 [[A:%.*]], i64* [[A_ADDR]], align 8 |
| // RV64ZBR-NEXT: [[TMP0:%.*]] = load i64, i64* [[A_ADDR]], align 8 |
| // RV64ZBR-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.crc32.b.i64(i64 [[TMP0]]) |
| // RV64ZBR-NEXT: ret i64 [[TMP1]] |
| // |
| long crc32_b(long a) { |
| return __builtin_riscv_crc32_b(a); |
| } |
| |
| // RV64ZBR-LABEL: @crc32_h( |
| // RV64ZBR-NEXT: entry: |
| // RV64ZBR-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 |
| // RV64ZBR-NEXT: store i64 [[A:%.*]], i64* [[A_ADDR]], align 8 |
| // RV64ZBR-NEXT: [[TMP0:%.*]] = load i64, i64* [[A_ADDR]], align 8 |
| // RV64ZBR-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.crc32.h.i64(i64 [[TMP0]]) |
| // RV64ZBR-NEXT: ret i64 [[TMP1]] |
| // |
| long crc32_h(long a) { |
| return __builtin_riscv_crc32_h(a); |
| } |
| |
| // RV64ZBR-LABEL: @crc32_w( |
| // RV64ZBR-NEXT: entry: |
| // RV64ZBR-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 |
| // RV64ZBR-NEXT: store i64 [[A:%.*]], i64* [[A_ADDR]], align 8 |
| // RV64ZBR-NEXT: [[TMP0:%.*]] = load i64, i64* [[A_ADDR]], align 8 |
| // RV64ZBR-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.crc32.w.i64(i64 [[TMP0]]) |
| // RV64ZBR-NEXT: ret i64 [[TMP1]] |
| // |
| long crc32_w(long a) { |
| return __builtin_riscv_crc32_w(a); |
| } |
| |
| // RV64ZBR-LABEL: @crc32c_b( |
| // RV64ZBR-NEXT: entry: |
| // RV64ZBR-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 |
| // RV64ZBR-NEXT: store i64 [[A:%.*]], i64* [[A_ADDR]], align 8 |
| // RV64ZBR-NEXT: [[TMP0:%.*]] = load i64, i64* [[A_ADDR]], align 8 |
| // RV64ZBR-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.crc32c.b.i64(i64 [[TMP0]]) |
| // RV64ZBR-NEXT: ret i64 [[TMP1]] |
| // |
| long crc32c_b(long a) { |
| return __builtin_riscv_crc32c_b(a); |
| } |
| |
| // RV64ZBR-LABEL: @crc32c_h( |
| // RV64ZBR-NEXT: entry: |
| // RV64ZBR-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 |
| // RV64ZBR-NEXT: store i64 [[A:%.*]], i64* [[A_ADDR]], align 8 |
| // RV64ZBR-NEXT: [[TMP0:%.*]] = load i64, i64* [[A_ADDR]], align 8 |
| // RV64ZBR-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.crc32c.h.i64(i64 [[TMP0]]) |
| // RV64ZBR-NEXT: ret i64 [[TMP1]] |
| // |
| long crc32c_h(long a) { |
| return __builtin_riscv_crc32c_h(a); |
| } |
| |
| // RV64ZBR-LABEL: @crc32c_w( |
| // RV64ZBR-NEXT: entry: |
| // RV64ZBR-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 |
| // RV64ZBR-NEXT: store i64 [[A:%.*]], i64* [[A_ADDR]], align 8 |
| // RV64ZBR-NEXT: [[TMP0:%.*]] = load i64, i64* [[A_ADDR]], align 8 |
| // RV64ZBR-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.crc32c.w.i64(i64 [[TMP0]]) |
| // RV64ZBR-NEXT: ret i64 [[TMP1]] |
| // |
| long crc32c_w(long a) { |
| return __builtin_riscv_crc32c_w(a); |
| } |
| |
| // RV64ZBR-LABEL: @crc32_d( |
| // RV64ZBR-NEXT: entry: |
| // RV64ZBR-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 |
| // RV64ZBR-NEXT: store i64 [[A:%.*]], i64* [[A_ADDR]], align 8 |
| // RV64ZBR-NEXT: [[TMP0:%.*]] = load i64, i64* [[A_ADDR]], align 8 |
| // RV64ZBR-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.crc32.d.i64(i64 [[TMP0]]) |
| // RV64ZBR-NEXT: ret i64 [[TMP1]] |
| // |
| long crc32_d(long a) { |
| return __builtin_riscv_crc32_d(a); |
| } |
| |
| // RV64ZBR-LABEL: @crc32c_d( |
| // RV64ZBR-NEXT: entry: |
| // RV64ZBR-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 |
| // RV64ZBR-NEXT: store i64 [[A:%.*]], i64* [[A_ADDR]], align 8 |
| // RV64ZBR-NEXT: [[TMP0:%.*]] = load i64, i64* [[A_ADDR]], align 8 |
| // RV64ZBR-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.crc32c.d.i64(i64 [[TMP0]]) |
| // RV64ZBR-NEXT: ret i64 [[TMP1]] |
| // |
| long crc32c_d(long a) { |
| return __builtin_riscv_crc32c_d(a); |
| } |