blob: 0768510e6f23cd5dd1ace9b59ffb9d12a6a0f9db [file] [log] [blame]
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// RUN: %clang_cc1 -triple riscv32 -target-feature +zknh -emit-llvm %s -o - \
// RUN: | FileCheck %s -check-prefix=RV32ZKNH
// RV32ZKNH-LABEL: @sha256sig0(
// RV32ZKNH-NEXT: entry:
// RV32ZKNH-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
// RV32ZKNH-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
// RV32ZKNH-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
// RV32ZKNH-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.sha256sig0.i32(i32 [[TMP0]])
// RV32ZKNH-NEXT: ret i32 [[TMP1]]
//
long sha256sig0(long rs1) {
return __builtin_riscv_sha256sig0(rs1);
}
// RV32ZKNH-LABEL: @sha256sig1(
// RV32ZKNH-NEXT: entry:
// RV32ZKNH-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
// RV32ZKNH-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
// RV32ZKNH-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
// RV32ZKNH-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.sha256sig1.i32(i32 [[TMP0]])
// RV32ZKNH-NEXT: ret i32 [[TMP1]]
//
long sha256sig1(long rs1) {
return __builtin_riscv_sha256sig1(rs1);
}
// RV32ZKNH-LABEL: @sha256sum0(
// RV32ZKNH-NEXT: entry:
// RV32ZKNH-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
// RV32ZKNH-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
// RV32ZKNH-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
// RV32ZKNH-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.sha256sum0.i32(i32 [[TMP0]])
// RV32ZKNH-NEXT: ret i32 [[TMP1]]
//
long sha256sum0(long rs1) {
return __builtin_riscv_sha256sum0(rs1);
}
// RV32ZKNH-LABEL: @sha256sum1(
// RV32ZKNH-NEXT: entry:
// RV32ZKNH-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
// RV32ZKNH-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
// RV32ZKNH-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
// RV32ZKNH-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.sha256sum1.i32(i32 [[TMP0]])
// RV32ZKNH-NEXT: ret i32 [[TMP1]]
//
long sha256sum1(long rs1) {
return __builtin_riscv_sha256sum1(rs1);
}
// RV32ZKNH-LABEL: @sha512sig0h(
// RV32ZKNH-NEXT: entry:
// RV32ZKNH-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
// RV32ZKNH-NEXT: [[RS2_ADDR:%.*]] = alloca i32, align 4
// RV32ZKNH-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
// RV32ZKNH-NEXT: store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
// RV32ZKNH-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
// RV32ZKNH-NEXT: [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
// RV32ZKNH-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.sha512sig0h(i32 [[TMP0]], i32 [[TMP1]])
// RV32ZKNH-NEXT: ret i32 [[TMP2]]
//
int sha512sig0h(int rs1, int rs2) {
return __builtin_riscv_sha512sig0h_32(rs1, rs2);
}
// RV32ZKNH-LABEL: @sha512sig0l(
// RV32ZKNH-NEXT: entry:
// RV32ZKNH-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
// RV32ZKNH-NEXT: [[RS2_ADDR:%.*]] = alloca i32, align 4
// RV32ZKNH-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
// RV32ZKNH-NEXT: store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
// RV32ZKNH-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
// RV32ZKNH-NEXT: [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
// RV32ZKNH-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.sha512sig0l(i32 [[TMP0]], i32 [[TMP1]])
// RV32ZKNH-NEXT: ret i32 [[TMP2]]
//
int sha512sig0l(int rs1, int rs2) {
return __builtin_riscv_sha512sig0l_32(rs1, rs2);
}
// RV32ZKNH-LABEL: @sha512sig1h(
// RV32ZKNH-NEXT: entry:
// RV32ZKNH-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
// RV32ZKNH-NEXT: [[RS2_ADDR:%.*]] = alloca i32, align 4
// RV32ZKNH-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
// RV32ZKNH-NEXT: store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
// RV32ZKNH-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
// RV32ZKNH-NEXT: [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
// RV32ZKNH-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.sha512sig1h(i32 [[TMP0]], i32 [[TMP1]])
// RV32ZKNH-NEXT: ret i32 [[TMP2]]
//
int sha512sig1h(int rs1, int rs2) {
return __builtin_riscv_sha512sig1h_32(rs1, rs2);
}
// RV32ZKNH-LABEL: @sha512sig1l(
// RV32ZKNH-NEXT: entry:
// RV32ZKNH-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
// RV32ZKNH-NEXT: [[RS2_ADDR:%.*]] = alloca i32, align 4
// RV32ZKNH-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
// RV32ZKNH-NEXT: store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
// RV32ZKNH-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
// RV32ZKNH-NEXT: [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
// RV32ZKNH-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.sha512sig1l(i32 [[TMP0]], i32 [[TMP1]])
// RV32ZKNH-NEXT: ret i32 [[TMP2]]
//
int sha512sig1l(int rs1, int rs2) {
return __builtin_riscv_sha512sig1l_32(rs1, rs2);
}
// RV32ZKNH-LABEL: @sha512sum0r(
// RV32ZKNH-NEXT: entry:
// RV32ZKNH-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
// RV32ZKNH-NEXT: [[RS2_ADDR:%.*]] = alloca i32, align 4
// RV32ZKNH-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
// RV32ZKNH-NEXT: store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
// RV32ZKNH-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
// RV32ZKNH-NEXT: [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
// RV32ZKNH-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.sha512sum0r(i32 [[TMP0]], i32 [[TMP1]])
// RV32ZKNH-NEXT: ret i32 [[TMP2]]
//
int sha512sum0r(int rs1, int rs2) {
return __builtin_riscv_sha512sum0r_32(rs1, rs2);
}
// RV32ZKNH-LABEL: @sha512sum1r(
// RV32ZKNH-NEXT: entry:
// RV32ZKNH-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
// RV32ZKNH-NEXT: [[RS2_ADDR:%.*]] = alloca i32, align 4
// RV32ZKNH-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
// RV32ZKNH-NEXT: store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
// RV32ZKNH-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
// RV32ZKNH-NEXT: [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
// RV32ZKNH-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.sha512sum1r(i32 [[TMP0]], i32 [[TMP1]])
// RV32ZKNH-NEXT: ret i32 [[TMP2]]
//
int sha512sum1r(int rs1, int rs2) {
return __builtin_riscv_sha512sum1r_32(rs1, rs2);
}