blob: 5f4c62f3b233b3745c80a0abc714031d014a16fc [file] [log] [blame]
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: -p --check-globals
; RUN: opt -S -mtriple=amdgcn-- -amdgpu-lower-module-lds --amdgpu-lower-module-lds-strategy=module < %s | FileCheck %s
; RUN: opt -S -mtriple=amdgcn-- -passes=amdgpu-lower-module-lds --amdgpu-lower-module-lds-strategy=module < %s | FileCheck %s
@lds.1 = internal unnamed_addr addrspace(3) global [2 x i8] undef, align 1
; CHECK: %llvm.amdgcn.kernel.k0.lds.t = type { [2 x i8] }
; CHECK: %llvm.amdgcn.kernel.k1.lds.t = type { [2 x i8] }
; CHECK: %llvm.amdgcn.kernel.k2.lds.t = type { i32 }
; CHECK: %llvm.amdgcn.kernel.k3.lds.t = type { [32 x i8] }
; CHECK: %llvm.amdgcn.kernel.k4.lds.t = type { [2 x i8] }
; CHECK: %llvm.amdgcn.kernel.k5.lds.t = type { [505 x i32] }
; CHECK: %llvm.amdgcn.kernel.k6.lds.t = type { [4 x i32] }
; Use constant from different kernels
;.
; CHECK: @llvm.amdgcn.kernel.k0.lds = internal addrspace(3) global %llvm.amdgcn.kernel.k0.lds.t undef, align 2
; CHECK: @llvm.amdgcn.kernel.k1.lds = internal addrspace(3) global %llvm.amdgcn.kernel.k1.lds.t undef, align 2
; CHECK: @llvm.amdgcn.kernel.k2.lds = internal addrspace(3) global %llvm.amdgcn.kernel.k2.lds.t undef, align 4
; CHECK: @llvm.amdgcn.kernel.k3.lds = internal addrspace(3) global %llvm.amdgcn.kernel.k3.lds.t undef, align 16
; CHECK: @llvm.amdgcn.kernel.k4.lds = internal addrspace(3) global %llvm.amdgcn.kernel.k4.lds.t undef, align 2
; CHECK: @llvm.amdgcn.kernel.k5.lds = internal addrspace(3) global %llvm.amdgcn.kernel.k5.lds.t undef, align 16
; CHECK: @llvm.amdgcn.kernel.k6.lds = internal addrspace(3) global %llvm.amdgcn.kernel.k6.lds.t undef, align 16
;.
define amdgpu_kernel void @k0(i64 %x) {
; CHECK-LABEL: @k0(
; CHECK-NEXT: %1 = addrspacecast ptr addrspace(3) @llvm.amdgcn.kernel.k0.lds to ptr
; CHECK-NEXT: %ptr = getelementptr inbounds i8, ptr %1, i64 %x
; CHECK-NEXT: store i8 1, ptr %ptr, align 1
; CHECK-NEXT: ret void
;
%ptr = getelementptr inbounds i8, ptr addrspacecast (ptr addrspace(3) @lds.1 to ptr), i64 %x
store i8 1, ptr addrspace(0) %ptr, align 1
ret void
}
define amdgpu_kernel void @k1(i64 %x) {
; CHECK-LABEL: @k1(
; CHECK-NEXT: %1 = addrspacecast ptr addrspace(3) @llvm.amdgcn.kernel.k1.lds to ptr
; CHECK-NEXT: %ptr = getelementptr inbounds i8, ptr %1, i64 %x
; CHECK-NEXT: store i8 1, ptr %ptr, align 1
; CHECK-NEXT: ret void
;
%ptr = getelementptr inbounds i8, ptr addrspacecast (ptr addrspace(3) @lds.1 to ptr), i64 %x
store i8 1, ptr addrspace(0) %ptr, align 1
ret void
}
@lds.2 = internal unnamed_addr addrspace(3) global i32 undef, align 4
; Use constant twice from the same kernel
define amdgpu_kernel void @k2(i64 %x) {
; CHECK-LABEL: @k2(
; CHECK-NEXT: store i8 1, ptr addrspace(3) @llvm.amdgcn.kernel.k2.lds, align 4
; CHECK-NEXT: store i8 2, ptr addrspace(3) @llvm.amdgcn.kernel.k2.lds, align 4
; CHECK-NEXT: ret void
;
store i8 1, ptr addrspace(3) @lds.2, align 4
store i8 2, ptr addrspace(3) @lds.2, align 4
ret void
}
@lds.3 = internal unnamed_addr addrspace(3) global [32 x i8] undef, align 1
; Use constant twice from the same kernel but a different other constant.
define amdgpu_kernel void @k3(i64 %x) {
; CHECK-LABEL: @k3(
; CHECK-NEXT: %1 = getelementptr inbounds [32 x i8], ptr addrspace(3) @llvm.amdgcn.kernel.k3.lds, i32 0, i32 16
; CHECK-NEXT: %ptr1 = addrspacecast ptr addrspace(3) %1 to ptr
; CHECK-NEXT: store i64 1, ptr %ptr1, align 1
; CHECK-NEXT: %2 = getelementptr inbounds [32 x i8], ptr addrspace(3) @llvm.amdgcn.kernel.k3.lds, i32 0, i32 24
; CHECK-NEXT: %ptr2 = addrspacecast ptr addrspace(3) %2 to ptr
; CHECK-NEXT: store i64 2, ptr %ptr2, align 8
; CHECK-NEXT: ret void
;
%ptr1 = addrspacecast ptr addrspace(3) getelementptr inbounds ([32 x i8], ptr addrspace(3) @lds.3, i32 0, i32 16) to ptr
store i64 1, ptr %ptr1, align 1
%ptr2 = addrspacecast ptr addrspace(3) getelementptr inbounds ([32 x i8], ptr addrspace(3) @lds.3, i32 0, i32 24) to ptr
store i64 2, ptr %ptr2, align 1
ret void
}
; @lds.1 is used from constant expressions in different kernels.
define amdgpu_kernel void @k4(i64 %x) {
; CHECK-LABEL: @k4(
; CHECK-NEXT: %1 = addrspacecast ptr addrspace(3) @llvm.amdgcn.kernel.k4.lds to ptr
; CHECK-NEXT: %ptr = getelementptr inbounds i8, ptr %1, i64 %x
; CHECK-NEXT: store i8 1, ptr %ptr, align 1
; CHECK-NEXT: ret void
;
%ptr = getelementptr inbounds i8, ptr addrspacecast (ptr addrspace(3) @lds.1 to ptr), i64 %x
store i8 1, ptr addrspace(0) %ptr, align 1
ret void
}
@lds.4 = internal unnamed_addr addrspace(3) global [505 x i32] undef, align 4
; Multiple constexpr use in a same instruction.
define amdgpu_kernel void @k5() {
; CHECK-LABEL: @k5(
; CHECK-NEXT: %1 = addrspacecast ptr addrspace(3) @llvm.amdgcn.kernel.k5.lds to ptr
; CHECK-NEXT: %2 = addrspacecast ptr addrspace(3) @llvm.amdgcn.kernel.k5.lds to ptr
; CHECK-NEXT: call void undef(ptr %1, ptr %2)
;
call void undef(ptr addrspacecast (ptr addrspace(3) @lds.4 to ptr), ptr addrspacecast (ptr addrspace(3) @lds.4 to ptr))
ret void
}
@lds.5 = internal addrspace(3) global [4 x i32] undef, align 4
; Both the *value* and *pointer* operands of store instruction are constant expressions, and
; both of these constant expression paths use same lds - @lds.5. Hence both of these constant
; expression operands of store should be replaced by equivalent instruction sequences.
define amdgpu_kernel void @k6() {
; CHECK-LABEL: @k6(
; CHECK-NEXT: %1 = getelementptr inbounds [4 x i32], ptr addrspace(3) @llvm.amdgcn.kernel.k6.lds, i32 0, i32 2
; CHECK-NEXT: %2 = ptrtoint ptr addrspace(3) %1 to i32
; CHECK-NEXT: %3 = getelementptr inbounds [4 x i32], ptr addrspace(3) @llvm.amdgcn.kernel.k6.lds, i32 0, i32 2
; CHECK-NEXT: store i32 %2, ptr addrspace(3) %3, align 8
; CHECK-NEXT: ret void
;
store i32 ptrtoint (ptr addrspace(3) getelementptr inbounds ([4 x i32], ptr addrspace(3) @lds.5, i32 0, i32 2) to i32), ptr addrspace(3) getelementptr inbounds ([4 x i32], ptr addrspace(3) @lds.5, i32 0, i32 2)
ret void
}