| // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py |
| // REQUIRES: amdgpu-registered-target |
| // REQUIRES: x86-registered-target |
| |
| // RUN: %clang_cc1 "-aux-triple" "x86_64-unknown-linux-gnu" "-triple" "spirv64-amd-amdhsa" \ |
| // RUN: -fcuda-is-device "-aux-target-cpu" "x86-64" -emit-llvm -o - %s | FileCheck %s |
| |
| #include "Inputs/cuda.h" |
| |
| // CHECK-LABEL: @_Z8test_argPDF16bDF16b( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[OUT_ADDR:%.*]] = alloca ptr addrspace(4), align 8 |
| // CHECK-NEXT: [[IN_ADDR:%.*]] = alloca bfloat, align 2 |
| // CHECK-NEXT: [[BF16:%.*]] = alloca bfloat, align 2 |
| // CHECK-NEXT: [[OUT_ADDR_ASCAST:%.*]] = addrspacecast ptr [[OUT_ADDR]] to ptr addrspace(4) |
| // CHECK-NEXT: [[IN_ADDR_ASCAST:%.*]] = addrspacecast ptr [[IN_ADDR]] to ptr addrspace(4) |
| // CHECK-NEXT: [[BF16_ASCAST:%.*]] = addrspacecast ptr [[BF16]] to ptr addrspace(4) |
| // CHECK-NEXT: store ptr addrspace(4) [[OUT:%.*]], ptr addrspace(4) [[OUT_ADDR_ASCAST]], align 8 |
| // CHECK-NEXT: store bfloat [[IN:%.*]], ptr addrspace(4) [[IN_ADDR_ASCAST]], align 2 |
| // CHECK-NEXT: [[TMP0:%.*]] = load bfloat, ptr addrspace(4) [[IN_ADDR_ASCAST]], align 2 |
| // CHECK-NEXT: store bfloat [[TMP0]], ptr addrspace(4) [[BF16_ASCAST]], align 2 |
| // CHECK-NEXT: [[TMP1:%.*]] = load bfloat, ptr addrspace(4) [[BF16_ASCAST]], align 2 |
| // CHECK-NEXT: [[TMP2:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[OUT_ADDR_ASCAST]], align 8 |
| // CHECK-NEXT: store bfloat [[TMP1]], ptr addrspace(4) [[TMP2]], align 2 |
| // CHECK-NEXT: ret void |
| // |
| __device__ void test_arg(__bf16 *out, __bf16 in) { |
| __bf16 bf16 = in; |
| *out = bf16; |
| } |
| |
| // CHECK-LABEL: @_Z9test_loadPDF16bS_( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[OUT_ADDR:%.*]] = alloca ptr addrspace(4), align 8 |
| // CHECK-NEXT: [[IN_ADDR:%.*]] = alloca ptr addrspace(4), align 8 |
| // CHECK-NEXT: [[BF16:%.*]] = alloca bfloat, align 2 |
| // CHECK-NEXT: [[OUT_ADDR_ASCAST:%.*]] = addrspacecast ptr [[OUT_ADDR]] to ptr addrspace(4) |
| // CHECK-NEXT: [[IN_ADDR_ASCAST:%.*]] = addrspacecast ptr [[IN_ADDR]] to ptr addrspace(4) |
| // CHECK-NEXT: [[BF16_ASCAST:%.*]] = addrspacecast ptr [[BF16]] to ptr addrspace(4) |
| // CHECK-NEXT: store ptr addrspace(4) [[OUT:%.*]], ptr addrspace(4) [[OUT_ADDR_ASCAST]], align 8 |
| // CHECK-NEXT: store ptr addrspace(4) [[IN:%.*]], ptr addrspace(4) [[IN_ADDR_ASCAST]], align 8 |
| // CHECK-NEXT: [[TMP0:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[IN_ADDR_ASCAST]], align 8 |
| // CHECK-NEXT: [[TMP1:%.*]] = load bfloat, ptr addrspace(4) [[TMP0]], align 2 |
| // CHECK-NEXT: store bfloat [[TMP1]], ptr addrspace(4) [[BF16_ASCAST]], align 2 |
| // CHECK-NEXT: [[TMP2:%.*]] = load bfloat, ptr addrspace(4) [[BF16_ASCAST]], align 2 |
| // CHECK-NEXT: [[TMP3:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[OUT_ADDR_ASCAST]], align 8 |
| // CHECK-NEXT: store bfloat [[TMP2]], ptr addrspace(4) [[TMP3]], align 2 |
| // CHECK-NEXT: ret void |
| // |
| __device__ void test_load(__bf16 *out, __bf16 *in) { |
| __bf16 bf16 = *in; |
| *out = bf16; |
| } |
| |
| // CHECK-LABEL: @_Z8test_retDF16b( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[RETVAL:%.*]] = alloca bfloat, align 2 |
| // CHECK-NEXT: [[IN_ADDR:%.*]] = alloca bfloat, align 2 |
| // CHECK-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr [[RETVAL]] to ptr addrspace(4) |
| // CHECK-NEXT: [[IN_ADDR_ASCAST:%.*]] = addrspacecast ptr [[IN_ADDR]] to ptr addrspace(4) |
| // CHECK-NEXT: store bfloat [[IN:%.*]], ptr addrspace(4) [[IN_ADDR_ASCAST]], align 2 |
| // CHECK-NEXT: [[TMP0:%.*]] = load bfloat, ptr addrspace(4) [[IN_ADDR_ASCAST]], align 2 |
| // CHECK-NEXT: ret bfloat [[TMP0]] |
| // |
| __device__ __bf16 test_ret( __bf16 in) { |
| return in; |
| } |
| |
| // CHECK-LABEL: @_Z9test_callDF16b( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[RETVAL:%.*]] = alloca bfloat, align 2 |
| // CHECK-NEXT: [[IN_ADDR:%.*]] = alloca bfloat, align 2 |
| // CHECK-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr [[RETVAL]] to ptr addrspace(4) |
| // CHECK-NEXT: [[IN_ADDR_ASCAST:%.*]] = addrspacecast ptr [[IN_ADDR]] to ptr addrspace(4) |
| // CHECK-NEXT: store bfloat [[IN:%.*]], ptr addrspace(4) [[IN_ADDR_ASCAST]], align 2 |
| // CHECK-NEXT: [[TMP0:%.*]] = load bfloat, ptr addrspace(4) [[IN_ADDR_ASCAST]], align 2 |
| // CHECK-NEXT: [[CALL:%.*]] = call contract spir_func noundef addrspace(4) bfloat @_Z8test_retDF16b(bfloat noundef [[TMP0]]) #[[ATTR1:[0-9]+]] |
| // CHECK-NEXT: ret bfloat [[CALL]] |
| // |
| __device__ __bf16 test_call( __bf16 in) { |
| return test_ret(in); |
| } |
| |
| |
| // CHECK-LABEL: @_Z15test_vec_assignv( |
| // CHECK-NEXT: entry: |
| // CHECK-NEXT: [[VEC2_A:%.*]] = alloca <2 x bfloat>, align 4 |
| // CHECK-NEXT: [[VEC2_B:%.*]] = alloca <2 x bfloat>, align 4 |
| // CHECK-NEXT: [[VEC4_A:%.*]] = alloca <4 x bfloat>, align 8 |
| // CHECK-NEXT: [[VEC4_B:%.*]] = alloca <4 x bfloat>, align 8 |
| // CHECK-NEXT: [[VEC8_A:%.*]] = alloca <8 x bfloat>, align 16 |
| // CHECK-NEXT: [[VEC8_B:%.*]] = alloca <8 x bfloat>, align 16 |
| // CHECK-NEXT: [[VEC16_A:%.*]] = alloca <16 x bfloat>, align 32 |
| // CHECK-NEXT: [[VEC16_B:%.*]] = alloca <16 x bfloat>, align 32 |
| // CHECK-NEXT: [[VEC2_A_ASCAST:%.*]] = addrspacecast ptr [[VEC2_A]] to ptr addrspace(4) |
| // CHECK-NEXT: [[VEC2_B_ASCAST:%.*]] = addrspacecast ptr [[VEC2_B]] to ptr addrspace(4) |
| // CHECK-NEXT: [[VEC4_A_ASCAST:%.*]] = addrspacecast ptr [[VEC4_A]] to ptr addrspace(4) |
| // CHECK-NEXT: [[VEC4_B_ASCAST:%.*]] = addrspacecast ptr [[VEC4_B]] to ptr addrspace(4) |
| // CHECK-NEXT: [[VEC8_A_ASCAST:%.*]] = addrspacecast ptr [[VEC8_A]] to ptr addrspace(4) |
| // CHECK-NEXT: [[VEC8_B_ASCAST:%.*]] = addrspacecast ptr [[VEC8_B]] to ptr addrspace(4) |
| // CHECK-NEXT: [[VEC16_A_ASCAST:%.*]] = addrspacecast ptr [[VEC16_A]] to ptr addrspace(4) |
| // CHECK-NEXT: [[VEC16_B_ASCAST:%.*]] = addrspacecast ptr [[VEC16_B]] to ptr addrspace(4) |
| // CHECK-NEXT: [[TMP0:%.*]] = load <2 x bfloat>, ptr addrspace(4) [[VEC2_B_ASCAST]], align 4 |
| // CHECK-NEXT: store <2 x bfloat> [[TMP0]], ptr addrspace(4) [[VEC2_A_ASCAST]], align 4 |
| // CHECK-NEXT: [[TMP1:%.*]] = load <4 x bfloat>, ptr addrspace(4) [[VEC4_B_ASCAST]], align 8 |
| // CHECK-NEXT: store <4 x bfloat> [[TMP1]], ptr addrspace(4) [[VEC4_A_ASCAST]], align 8 |
| // CHECK-NEXT: [[TMP2:%.*]] = load <8 x bfloat>, ptr addrspace(4) [[VEC8_B_ASCAST]], align 16 |
| // CHECK-NEXT: store <8 x bfloat> [[TMP2]], ptr addrspace(4) [[VEC8_A_ASCAST]], align 16 |
| // CHECK-NEXT: [[TMP3:%.*]] = load <16 x bfloat>, ptr addrspace(4) [[VEC16_B_ASCAST]], align 32 |
| // CHECK-NEXT: store <16 x bfloat> [[TMP3]], ptr addrspace(4) [[VEC16_A_ASCAST]], align 32 |
| // CHECK-NEXT: ret void |
| // |
| __device__ void test_vec_assign() { |
| typedef __attribute__((ext_vector_type(2))) __bf16 bf16_x2; |
| bf16_x2 vec2_a, vec2_b; |
| vec2_a = vec2_b; |
| |
| typedef __attribute__((ext_vector_type(4))) __bf16 bf16_x4; |
| bf16_x4 vec4_a, vec4_b; |
| vec4_a = vec4_b; |
| |
| typedef __attribute__((ext_vector_type(8))) __bf16 bf16_x8; |
| bf16_x8 vec8_a, vec8_b; |
| vec8_a = vec8_b; |
| |
| typedef __attribute__((ext_vector_type(16))) __bf16 bf16_x16; |
| bf16_x16 vec16_a, vec16_b; |
| vec16_a = vec16_b; |
| } |