blob: 871ced11e1a92671461c47e7f0054f6e4e0e2132 [file] [log] [blame]
; RUN: opt %loadPolly -polly-codegen-ppcg -polly-acc-dump-code \
; RUN: -disable-output < %s | \
; RUN: FileCheck -check-prefix=CODE %s
; REQUIRES: pollyacc
; CODE: Code
; CODE-NEXT: ====
; CODE-NEXT: # host
; CODE-NEXT: {
; CODE-NEXT: cudaCheckReturn(cudaMemcpy(dev_MemRef_A, MemRef_A, (1024) * sizeof(half), cudaMemcpyHostToDevice));
; CODE-NEXT: cudaCheckReturn(cudaMemcpy(dev_MemRef_b, &MemRef_b, sizeof(half), cudaMemcpyHostToDevice));
; CODE-NEXT: {
; CODE-NEXT: dim3 k0_dimBlock(32);
; CODE-NEXT: dim3 k0_dimGrid(32);
; CODE-NEXT: kernel0 <<<k0_dimGrid, k0_dimBlock>>> (dev_MemRef_A, dev_MemRef_b);
; CODE-NEXT: cudaCheckKernel();
; CODE-NEXT: }
; CODE: cudaCheckReturn(cudaMemcpy(MemRef_A, dev_MemRef_A, (1024) * sizeof(half), cudaMemcpyDeviceToHost));
; CODE-NEXT: }
; CODE: # kernel0
; CODE-NEXT: Stmt_bb2(32 * b0 + t0);
; void foo(half A[], half b) {
; for (long i = 0; i < 1024; i++)
; A[i] += b;
; }
;
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
define void @half(half* %A, half %b) {
bb:
br label %bb1
bb1: ; preds = %bb5, %bb
%i.0 = phi i64 [ 0, %bb ], [ %tmp6, %bb5 ]
%exitcond = icmp ne i64 %i.0, 1024
br i1 %exitcond, label %bb2, label %bb7
bb2: ; preds = %bb1
%tmp = getelementptr inbounds half, half* %A, i64 %i.0
%tmp3 = load half, half* %tmp, align 4
%tmp4 = fadd half %tmp3, %b
store half %tmp4, half* %tmp, align 4
br label %bb5
bb5: ; preds = %bb2
%tmp6 = add nuw nsw i64 %i.0, 1
br label %bb1
bb7: ; preds = %bb1
ret void
}
; CODE: Code
; CODE-NEXT: ====
; CODE-NEXT: # host
; CODE-NEXT: {
; CODE-NEXT: cudaCheckReturn(cudaMemcpy(dev_MemRef_A, MemRef_A, (1024) * sizeof(float), cudaMemcpyHostToDevice));
; CODE-NEXT: cudaCheckReturn(cudaMemcpy(dev_MemRef_b, &MemRef_b, sizeof(float), cudaMemcpyHostToDevice));
; CODE-NEXT: {
; CODE-NEXT: dim3 k0_dimBlock(32);
; CODE-NEXT: dim3 k0_dimGrid(32);
; CODE-NEXT: kernel0 <<<k0_dimGrid, k0_dimBlock>>> (dev_MemRef_A, dev_MemRef_b);
; CODE-NEXT: cudaCheckKernel();
; CODE-NEXT: }
; CODE: cudaCheckReturn(cudaMemcpy(MemRef_A, dev_MemRef_A, (1024) * sizeof(float), cudaMemcpyDeviceToHost));
; CODE-NEXT: }
; CODE: # kernel0
; CODE-NEXT: Stmt_bb2(32 * b0 + t0);
; void foo(float A[], float b) {
; for (long i = 0; i < 1024; i++)
; A[i] += b;
; }
;
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
define void @float(float* %A, float %b) {
bb:
br label %bb1
bb1: ; preds = %bb5, %bb
%i.0 = phi i64 [ 0, %bb ], [ %tmp6, %bb5 ]
%exitcond = icmp ne i64 %i.0, 1024
br i1 %exitcond, label %bb2, label %bb7
bb2: ; preds = %bb1
%tmp = getelementptr inbounds float, float* %A, i64 %i.0
%tmp3 = load float, float* %tmp, align 4
%tmp4 = fadd float %tmp3, %b
store float %tmp4, float* %tmp, align 4
br label %bb5
bb5: ; preds = %bb2
%tmp6 = add nuw nsw i64 %i.0, 1
br label %bb1
bb7: ; preds = %bb1
ret void
}
; CODE: Code
; CODE-NEXT: ====
; CODE-NEXT: # host
; CODE-NEXT: {
; CODE-NEXT: cudaCheckReturn(cudaMemcpy(dev_MemRef_A, MemRef_A, (1024) * sizeof(double), cudaMemcpyHostToDevice));
; CODE-NEXT: cudaCheckReturn(cudaMemcpy(dev_MemRef_b, &MemRef_b, sizeof(double), cudaMemcpyHostToDevice));
; CODE-NEXT: {
; CODE-NEXT: dim3 k0_dimBlock(32);
; CODE-NEXT: dim3 k0_dimGrid(32);
; CODE-NEXT: kernel0 <<<k0_dimGrid, k0_dimBlock>>> (dev_MemRef_A, dev_MemRef_b);
; CODE-NEXT: cudaCheckKernel();
; CODE-NEXT: }
; CODE: cudaCheckReturn(cudaMemcpy(MemRef_A, dev_MemRef_A, (1024) * sizeof(double), cudaMemcpyDeviceToHost));
; CODE-NEXT: }
; CODE: # kernel0
; CODE-NEXT: Stmt_bb2(32 * b0 + t0);
; void foo(double A[], double b) {
; for (long i = 0; i < 1024; i++)
; A[i] += b;
; }
;
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
define void @double(double* %A, double %b) {
bb:
br label %bb1
bb1: ; preds = %bb5, %bb
%i.0 = phi i64 [ 0, %bb ], [ %tmp6, %bb5 ]
%exitcond = icmp ne i64 %i.0, 1024
br i1 %exitcond, label %bb2, label %bb7
bb2: ; preds = %bb1
%tmp = getelementptr inbounds double, double* %A, i64 %i.0
%tmp3 = load double, double* %tmp, align 4
%tmp4 = fadd double %tmp3, %b
store double %tmp4, double* %tmp, align 4
br label %bb5
bb5: ; preds = %bb2
%tmp6 = add nuw nsw i64 %i.0, 1
br label %bb1
bb7: ; preds = %bb1
ret void
}
; CODE: Code
; CODE-NEXT: ====
; CODE-NEXT: # host
; CODE-NEXT: {
; CODE-NEXT: cudaCheckReturn(cudaMemcpy(dev_MemRef_A, MemRef_A, (1024) * sizeof(fp128), cudaMemcpyHostToDevice));
; CODE-NEXT: cudaCheckReturn(cudaMemcpy(dev_MemRef_b, &MemRef_b, sizeof(fp128), cudaMemcpyHostToDevice));
; CODE-NEXT: {
; CODE-NEXT: dim3 k0_dimBlock(32);
; CODE-NEXT: dim3 k0_dimGrid(32);
; CODE-NEXT: kernel0 <<<k0_dimGrid, k0_dimBlock>>> (dev_MemRef_A, dev_MemRef_b);
; CODE-NEXT: cudaCheckKernel();
; CODE-NEXT: }
; CODE: cudaCheckReturn(cudaMemcpy(MemRef_A, dev_MemRef_A, (1024) * sizeof(fp128), cudaMemcpyDeviceToHost));
; CODE-NEXT: }
; CODE: # kernel0
; CODE-NEXT: Stmt_bb2(32 * b0 + t0);
; void foo(fp128 A[], fp128 b) {
; for (long i = 0; i < 1024; i++)
; A[i] += b;
; }
;
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
define void @fp128(fp128* %A, fp128 %b) {
bb:
br label %bb1
bb1: ; preds = %bb5, %bb
%i.0 = phi i64 [ 0, %bb ], [ %tmp6, %bb5 ]
%exitcond = icmp ne i64 %i.0, 1024
br i1 %exitcond, label %bb2, label %bb7
bb2: ; preds = %bb1
%tmp = getelementptr inbounds fp128, fp128* %A, i64 %i.0
%tmp3 = load fp128, fp128* %tmp, align 4
%tmp4 = fadd fp128 %tmp3, %b
store fp128 %tmp4, fp128* %tmp, align 4
br label %bb5
bb5: ; preds = %bb2
%tmp6 = add nuw nsw i64 %i.0, 1
br label %bb1
bb7: ; preds = %bb1
ret void
}
; CODE: Code
; CODE-NEXT: ====
; CODE-NEXT: # host
; CODE-NEXT: {
; CODE-NEXT: cudaCheckReturn(cudaMemcpy(dev_MemRef_A, MemRef_A, (1024) * sizeof(x86_fp80), cudaMemcpyHostToDevice));
; CODE-NEXT: cudaCheckReturn(cudaMemcpy(dev_MemRef_b, &MemRef_b, sizeof(x86_fp80), cudaMemcpyHostToDevice));
; CODE-NEXT: {
; CODE-NEXT: dim3 k0_dimBlock(32);
; CODE-NEXT: dim3 k0_dimGrid(32);
; CODE-NEXT: kernel0 <<<k0_dimGrid, k0_dimBlock>>> (dev_MemRef_A, dev_MemRef_b);
; CODE-NEXT: cudaCheckKernel();
; CODE-NEXT: }
; CODE: cudaCheckReturn(cudaMemcpy(MemRef_A, dev_MemRef_A, (1024) * sizeof(x86_fp80), cudaMemcpyDeviceToHost));
; CODE-NEXT: }
; CODE: # kernel0
; CODE-NEXT: Stmt_bb2(32 * b0 + t0);
; void foo(x86_fp80 A[], x86_fp80 b) {
; for (long i = 0; i < 1024; i++)
; A[i] += b;
; }
;
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
define void @x86_fp80(x86_fp80* %A, x86_fp80 %b) {
bb:
br label %bb1
bb1: ; preds = %bb5, %bb
%i.0 = phi i64 [ 0, %bb ], [ %tmp6, %bb5 ]
%exitcond = icmp ne i64 %i.0, 1024
br i1 %exitcond, label %bb2, label %bb7
bb2: ; preds = %bb1
%tmp = getelementptr inbounds x86_fp80, x86_fp80* %A, i64 %i.0
%tmp3 = load x86_fp80, x86_fp80* %tmp, align 4
%tmp4 = fadd x86_fp80 %tmp3, %b
store x86_fp80 %tmp4, x86_fp80* %tmp, align 4
br label %bb5
bb5: ; preds = %bb2
%tmp6 = add nuw nsw i64 %i.0, 1
br label %bb1
bb7: ; preds = %bb1
ret void
}
; CODE: Code
; CODE-NEXT: ====
; CODE-NEXT: # host
; CODE-NEXT: {
; CODE-NEXT: cudaCheckReturn(cudaMemcpy(dev_MemRef_A, MemRef_A, (1024) * sizeof(ppc_fp128), cudaMemcpyHostToDevice));
; CODE-NEXT: cudaCheckReturn(cudaMemcpy(dev_MemRef_b, &MemRef_b, sizeof(ppc_fp128), cudaMemcpyHostToDevice));
; CODE-NEXT: {
; CODE-NEXT: dim3 k0_dimBlock(32);
; CODE-NEXT: dim3 k0_dimGrid(32);
; CODE-NEXT: kernel0 <<<k0_dimGrid, k0_dimBlock>>> (dev_MemRef_A, dev_MemRef_b);
; CODE-NEXT: cudaCheckKernel();
; CODE-NEXT: }
; CODE: cudaCheckReturn(cudaMemcpy(MemRef_A, dev_MemRef_A, (1024) * sizeof(ppc_fp128), cudaMemcpyDeviceToHost));
; CODE-NEXT: }
; CODE: # kernel0
; CODE-NEXT: Stmt_bb2(32 * b0 + t0);
; void foo(ppc_fp128 A[], ppc_fp128 b) {
; for (long i = 0; i < 1024; i++)
; A[i] += b;
; }
;
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
define void @ppc_fp128(ppc_fp128* %A, ppc_fp128 %b) {
bb:
br label %bb1
bb1: ; preds = %bb5, %bb
%i.0 = phi i64 [ 0, %bb ], [ %tmp6, %bb5 ]
%exitcond = icmp ne i64 %i.0, 1024
br i1 %exitcond, label %bb2, label %bb7
bb2: ; preds = %bb1
%tmp = getelementptr inbounds ppc_fp128, ppc_fp128* %A, i64 %i.0
%tmp3 = load ppc_fp128, ppc_fp128* %tmp, align 4
%tmp4 = fadd ppc_fp128 %tmp3, %b
store ppc_fp128 %tmp4, ppc_fp128* %tmp, align 4
br label %bb5
bb5: ; preds = %bb2
%tmp6 = add nuw nsw i64 %i.0, 1
br label %bb1
bb7: ; preds = %bb1
ret void
}
; CODE: Code
; CODE-NEXT: ====
; CODE-NEXT: # host
; CODE-NEXT: {
; CODE-NEXT: cudaCheckReturn(cudaMemcpy(dev_MemRef_A, MemRef_A, (1024) * sizeof(i1), cudaMemcpyHostToDevice));
; CODE-NEXT: {
; CODE-NEXT: dim3 k0_dimBlock(32);
; CODE-NEXT: dim3 k0_dimGrid(32);
; CODE-NEXT: kernel0 <<<k0_dimGrid, k0_dimBlock>>> (dev_MemRef_A);
; CODE-NEXT: cudaCheckKernel();
; CODE-NEXT: }
; CODE: cudaCheckReturn(cudaMemcpy(MemRef_A, dev_MemRef_A, (1024) * sizeof(i1), cudaMemcpyDeviceToHost));
; CODE-NEXT: }
; CODE: # kernel0
; CODE-NEXT: Stmt_bb2(32 * b0 + t0);
; void foo(i1 A[], i1 b) {
; for (long i = 0; i < 1024; i++)
; A[i] += b;
; }
;
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
define void @i1(i1* %A, i1 %b) {
bb:
br label %bb1
bb1: ; preds = %bb5, %bb
%i.0 = phi i64 [ 0, %bb ], [ %tmp6, %bb5 ]
%exitcond = icmp ne i64 %i.0, 1024
br i1 %exitcond, label %bb2, label %bb7
bb2: ; preds = %bb1
%tmp = getelementptr inbounds i1, i1* %A, i64 %i.0
%tmp3 = load i1, i1* %tmp, align 4
%tmp4 = add i1 %tmp3, %b
store i1 %tmp4, i1* %tmp, align 4
br label %bb5
bb5: ; preds = %bb2
%tmp6 = add nuw nsw i64 %i.0, 1
br label %bb1
bb7: ; preds = %bb1
ret void
}
; CODE: Code
; CODE-NEXT: ====
; CODE-NEXT: # host
; CODE-NEXT: {
; CODE-NEXT: cudaCheckReturn(cudaMemcpy(dev_MemRef_A, MemRef_A, (1024) * sizeof(i3), cudaMemcpyHostToDevice));
; CODE-NEXT: {
; CODE-NEXT: dim3 k0_dimBlock(32);
; CODE-NEXT: dim3 k0_dimGrid(32);
; CODE-NEXT: kernel0 <<<k0_dimGrid, k0_dimBlock>>> (dev_MemRef_A);
; CODE-NEXT: cudaCheckKernel();
; CODE-NEXT: }
; CODE: cudaCheckReturn(cudaMemcpy(MemRef_A, dev_MemRef_A, (1024) * sizeof(i3), cudaMemcpyDeviceToHost));
; CODE-NEXT: }
; CODE: # kernel0
; CODE-NEXT: Stmt_bb2(32 * b0 + t0);
; void foo(i3 A[], i3 b) {
; for (long i = 0; i < 1024; i++)
; A[i] += b;
; }
;
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
define void @i3(i3* %A, i3 %b) {
bb:
br label %bb1
bb1: ; preds = %bb5, %bb
%i.0 = phi i64 [ 0, %bb ], [ %tmp6, %bb5 ]
%exitcond = icmp ne i64 %i.0, 1024
br i1 %exitcond, label %bb2, label %bb7
bb2: ; preds = %bb1
%tmp = getelementptr inbounds i3, i3* %A, i64 %i.0
%tmp3 = load i3, i3* %tmp, align 4
%tmp4 = add i3 %tmp3, %b
store i3 %tmp4, i3* %tmp, align 4
br label %bb5
bb5: ; preds = %bb2
%tmp6 = add nuw nsw i64 %i.0, 1
br label %bb1
bb7: ; preds = %bb1
ret void
}
; CODE: Code
; CODE-NEXT: ====
; CODE-NEXT: # host
; CODE-NEXT: {
; CODE-NEXT: cudaCheckReturn(cudaMemcpy(dev_MemRef_A, MemRef_A, (1024) * sizeof(i8), cudaMemcpyHostToDevice));
; CODE-NEXT: {
; CODE-NEXT: dim3 k0_dimBlock(32);
; CODE-NEXT: dim3 k0_dimGrid(32);
; CODE-NEXT: kernel0 <<<k0_dimGrid, k0_dimBlock>>> (dev_MemRef_A);
; CODE-NEXT: cudaCheckKernel();
; CODE-NEXT: }
; CODE: cudaCheckReturn(cudaMemcpy(MemRef_A, dev_MemRef_A, (1024) * sizeof(i8), cudaMemcpyDeviceToHost));
; CODE-NEXT: }
; CODE: # kernel0
; CODE-NEXT: Stmt_bb2(32 * b0 + t0);
; void foo(i8 A[], i32 b) {
; for (long i = 0; i < 1024; i++)
; A[i] += b;
; }
;
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
define void @i8(i8* %A, i8 %b) {
bb:
br label %bb1
bb1: ; preds = %bb5, %bb
%i.0 = phi i64 [ 0, %bb ], [ %tmp6, %bb5 ]
%exitcond = icmp ne i64 %i.0, 1024
br i1 %exitcond, label %bb2, label %bb7
bb2: ; preds = %bb1
%tmp = getelementptr inbounds i8, i8* %A, i64 %i.0
%tmp3 = load i8, i8* %tmp, align 4
%tmp4 = add i8 %tmp3, %b
store i8 %tmp4, i8* %tmp, align 4
br label %bb5
bb5: ; preds = %bb2
%tmp6 = add nuw nsw i64 %i.0, 1
br label %bb1
bb7: ; preds = %bb1
ret void
}
; CODE: Code
; CODE-NEXT: ====
; CODE-NEXT: # host
; CODE-NEXT: {
; CODE-NEXT: cudaCheckReturn(cudaMemcpy(dev_MemRef_A, MemRef_A, (1024) * sizeof(i32), cudaMemcpyHostToDevice));
; CODE-NEXT: {
; CODE-NEXT: dim3 k0_dimBlock(32);
; CODE-NEXT: dim3 k0_dimGrid(32);
; CODE-NEXT: kernel0 <<<k0_dimGrid, k0_dimBlock>>> (dev_MemRef_A);
; CODE-NEXT: cudaCheckKernel();
; CODE-NEXT: }
; CODE: cudaCheckReturn(cudaMemcpy(MemRef_A, dev_MemRef_A, (1024) * sizeof(i32), cudaMemcpyDeviceToHost));
; CODE-NEXT: }
; CODE: # kernel0
; CODE-NEXT: Stmt_bb2(32 * b0 + t0);
; void foo(i32 A[], i32 b) {
; for (long i = 0; i < 1024; i++)
; A[i] += b;
; }
;
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
define void @i32(i32* %A, i32 %b) {
bb:
br label %bb1
bb1: ; preds = %bb5, %bb
%i.0 = phi i64 [ 0, %bb ], [ %tmp6, %bb5 ]
%exitcond = icmp ne i64 %i.0, 1024
br i1 %exitcond, label %bb2, label %bb7
bb2: ; preds = %bb1
%tmp = getelementptr inbounds i32, i32* %A, i64 %i.0
%tmp3 = load i32, i32* %tmp, align 4
%tmp4 = add i32 %tmp3, %b
store i32 %tmp4, i32* %tmp, align 4
br label %bb5
bb5: ; preds = %bb2
%tmp6 = add nuw nsw i64 %i.0, 1
br label %bb1
bb7: ; preds = %bb1
ret void
}
; CODE: Code
; CODE-NEXT: ====
; CODE-NEXT: # host
; CODE-NEXT: {
; CODE-NEXT: cudaCheckReturn(cudaMemcpy(dev_MemRef_A, MemRef_A, (1024) * sizeof(i60), cudaMemcpyHostToDevice));
; CODE-NEXT: {
; CODE-NEXT: dim3 k0_dimBlock(32);
; CODE-NEXT: dim3 k0_dimGrid(32);
; CODE-NEXT: kernel0 <<<k0_dimGrid, k0_dimBlock>>> (dev_MemRef_A);
; CODE-NEXT: cudaCheckKernel();
; CODE-NEXT: }
; CODE: cudaCheckReturn(cudaMemcpy(MemRef_A, dev_MemRef_A, (1024) * sizeof(i60), cudaMemcpyDeviceToHost));
; CODE-NEXT: }
; CODE: # kernel0
; CODE-NEXT: Stmt_bb2(32 * b0 + t0);
; void foo(i60 A[], i60 b) {
; for (long i = 0; i < 1024; i++)
; A[i] += b;
; }
;
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
define void @i60(i60* %A, i60 %b) {
bb:
br label %bb1
bb1: ; preds = %bb5, %bb
%i.0 = phi i64 [ 0, %bb ], [ %tmp6, %bb5 ]
%exitcond = icmp ne i64 %i.0, 1024
br i1 %exitcond, label %bb2, label %bb7
bb2: ; preds = %bb1
%tmp = getelementptr inbounds i60, i60* %A, i64 %i.0
%tmp3 = load i60, i60* %tmp, align 4
%tmp4 = add i60 %tmp3, %b
store i60 %tmp4, i60* %tmp, align 4
br label %bb5
bb5: ; preds = %bb2
%tmp6 = add nuw nsw i64 %i.0, 1
br label %bb1
bb7: ; preds = %bb1
ret void
}
; CODE: Code
; CODE-NEXT: ====
; CODE-NEXT: # host
; CODE-NEXT: {
; CODE-NEXT: cudaCheckReturn(cudaMemcpy(dev_MemRef_A, MemRef_A, (1024) * sizeof(i64), cudaMemcpyHostToDevice));
; CODE-NEXT: {
; CODE-NEXT: dim3 k0_dimBlock(32);
; CODE-NEXT: dim3 k0_dimGrid(32);
; CODE-NEXT: kernel0 <<<k0_dimGrid, k0_dimBlock>>> (dev_MemRef_A);
; CODE-NEXT: cudaCheckKernel();
; CODE-NEXT: }
; CODE: cudaCheckReturn(cudaMemcpy(MemRef_A, dev_MemRef_A, (1024) * sizeof(i64), cudaMemcpyDeviceToHost));
; CODE-NEXT: }
; CODE: # kernel0
; CODE-NEXT: Stmt_bb2(32 * b0 + t0);
; void foo(i64 A[], i64 b) {
; for (long i = 0; i < 1024; i++)
; A[i] += b;
; }
;
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
define void @i64(i64* %A, i64 %b) {
bb:
br label %bb1
bb1: ; preds = %bb5, %bb
%i.0 = phi i64 [ 0, %bb ], [ %tmp6, %bb5 ]
%exitcond = icmp ne i64 %i.0, 1024
br i1 %exitcond, label %bb2, label %bb7
bb2: ; preds = %bb1
%tmp = getelementptr inbounds i64, i64* %A, i64 %i.0
%tmp3 = load i64, i64* %tmp, align 4
%tmp4 = add i64 %tmp3, %b
store i64 %tmp4, i64* %tmp, align 4
br label %bb5
bb5: ; preds = %bb2
%tmp6 = add nuw nsw i64 %i.0, 1
br label %bb1
bb7: ; preds = %bb1
ret void
}
; CODE: Code
; CODE-NEXT: ====
; CODE-NEXT: # host
; CODE-NEXT: {
; CODE-NEXT: cudaCheckReturn(cudaMemcpy(dev_MemRef_A, MemRef_A, (1024) * sizeof(i80), cudaMemcpyHostToDevice));
; CODE-NEXT: {
; CODE-NEXT: dim3 k0_dimBlock(32);
; CODE-NEXT: dim3 k0_dimGrid(32);
; CODE-NEXT: kernel0 <<<k0_dimGrid, k0_dimBlock>>> (dev_MemRef_A);
; CODE-NEXT: cudaCheckKernel();
; CODE-NEXT: }
; CODE: cudaCheckReturn(cudaMemcpy(MemRef_A, dev_MemRef_A, (1024) * sizeof(i80), cudaMemcpyDeviceToHost));
; CODE-NEXT: }
; CODE: # kernel0
; CODE-NEXT: Stmt_bb2(32 * b0 + t0);
; void foo(i80 A[], i80 b) {
; for (long i = 0; i < 1024; i++)
; A[i] += b;
; }
;
target datalayout = "e-m:e-i80:64-f80:128-n8:16:32:64-S128"
define void @i80(i80* %A, i80 %b) {
bb:
br label %bb1
bb1: ; preds = %bb5, %bb
%i.0 = phi i80 [ 0, %bb ], [ %tmp6, %bb5 ]
%exitcond = icmp ne i80 %i.0, 1024
br i1 %exitcond, label %bb2, label %bb7
bb2: ; preds = %bb1
%tmp = getelementptr inbounds i80, i80* %A, i80 %i.0
%tmp3 = load i80, i80* %tmp, align 4
%tmp4 = add i80 %tmp3, %b
store i80 %tmp4, i80* %tmp, align 4
br label %bb5
bb5: ; preds = %bb2
%tmp6 = add nuw nsw i80 %i.0, 1
br label %bb1
bb7: ; preds = %bb1
ret void
}
; CODE: Code
; CODE-NEXT: ====
; CODE-NEXT: # host
; CODE-NEXT: {
; CODE-NEXT: cudaCheckReturn(cudaMemcpy(dev_MemRef_A, MemRef_A, (1024) * sizeof(i120), cudaMemcpyHostToDevice));
; CODE-NEXT: {
; CODE-NEXT: dim3 k0_dimBlock(32);
; CODE-NEXT: dim3 k0_dimGrid(32);
; CODE-NEXT: kernel0 <<<k0_dimGrid, k0_dimBlock>>> (dev_MemRef_A);
; CODE-NEXT: cudaCheckKernel();
; CODE-NEXT: }
; CODE: cudaCheckReturn(cudaMemcpy(MemRef_A, dev_MemRef_A, (1024) * sizeof(i120), cudaMemcpyDeviceToHost));
; CODE-NEXT: }
; CODE: # kernel0
; CODE-NEXT: Stmt_bb2(32 * b0 + t0);
; void foo(i120 A[], i120 b) {
; for (long i = 0; i < 1024; i++)
; A[i] += b;
; }
;
target datalayout = "e-m:e-i120:64-f80:128-n8:16:32:64-S128"
define void @i120(i120* %A, i120 %b) {
bb:
br label %bb1
bb1: ; preds = %bb5, %bb
%i.0 = phi i120 [ 0, %bb ], [ %tmp6, %bb5 ]
%exitcond = icmp ne i120 %i.0, 1024
br i1 %exitcond, label %bb2, label %bb7
bb2: ; preds = %bb1
%tmp = getelementptr inbounds i120, i120* %A, i120 %i.0
%tmp3 = load i120, i120* %tmp, align 4
%tmp4 = add i120 %tmp3, %b
store i120 %tmp4, i120* %tmp, align 4
br label %bb5
bb5: ; preds = %bb2
%tmp6 = add nuw nsw i120 %i.0, 1
br label %bb1
bb7: ; preds = %bb1
ret void
}
; CODE: Code
; CODE-NEXT: ====
; CODE-NEXT: # host
; CODE-NEXT: {
; CODE-NEXT: cudaCheckReturn(cudaMemcpy(dev_MemRef_A, MemRef_A, (1024) * sizeof(i128), cudaMemcpyHostToDevice));
; CODE-NEXT: {
; CODE-NEXT: dim3 k0_dimBlock(32);
; CODE-NEXT: dim3 k0_dimGrid(32);
; CODE-NEXT: kernel0 <<<k0_dimGrid, k0_dimBlock>>> (dev_MemRef_A);
; CODE-NEXT: cudaCheckKernel();
; CODE-NEXT: }
; CODE: cudaCheckReturn(cudaMemcpy(MemRef_A, dev_MemRef_A, (1024) * sizeof(i128), cudaMemcpyDeviceToHost));
; CODE-NEXT: }
; CODE: # kernel0
; CODE-NEXT: Stmt_bb2(32 * b0 + t0);
; void foo(i128 A[], i128 b) {
; for (long i = 0; i < 1024; i++)
; A[i] += b;
; }
;
target datalayout = "e-m:e-i128:64-f80:128-n8:16:32:64-S128"
define void @i128(i128* %A, i128 %b) {
bb:
br label %bb1
bb1: ; preds = %bb5, %bb
%i.0 = phi i128 [ 0, %bb ], [ %tmp6, %bb5 ]
%exitcond = icmp ne i128 %i.0, 1024
br i1 %exitcond, label %bb2, label %bb7
bb2: ; preds = %bb1
%tmp = getelementptr inbounds i128, i128* %A, i128 %i.0
%tmp3 = load i128, i128* %tmp, align 4
%tmp4 = add i128 %tmp3, %b
store i128 %tmp4, i128* %tmp, align 4
br label %bb5
bb5: ; preds = %bb2
%tmp6 = add nuw nsw i128 %i.0, 1
br label %bb1
bb7: ; preds = %bb1
ret void
}
; CODE: Code
; CODE-NEXT: ====
; CODE-NEXT: # host
; CODE-NEXT: {
; CODE-NEXT: cudaCheckReturn(cudaMemcpy(dev_MemRef_A, MemRef_A, (1024) * sizeof(i3000), cudaMemcpyHostToDevice));
; CODE-NEXT: {
; CODE-NEXT: dim3 k0_dimBlock(32);
; CODE-NEXT: dim3 k0_dimGrid(32);
; CODE-NEXT: kernel0 <<<k0_dimGrid, k0_dimBlock>>> (dev_MemRef_A);
; CODE-NEXT: cudaCheckKernel();
; CODE-NEXT: }
; CODE: cudaCheckReturn(cudaMemcpy(MemRef_A, dev_MemRef_A, (1024) * sizeof(i3000), cudaMemcpyDeviceToHost));
; CODE-NEXT: }
; CODE: # kernel0
; CODE-NEXT: Stmt_bb2(32 * b0 + t0);
; void foo(i3000 A[], i3000 b) {
; for (long i = 0; i < 1024; i++)
; A[i] += b;
; }
;
target datalayout = "e-m:e-i3000:64-f80:128-n8:16:32:64-S128"
define void @i3000(i3000* %A, i3000 %b) {
bb:
br label %bb1
bb1: ; preds = %bb5, %bb
%i.0 = phi i3000 [ 0, %bb ], [ %tmp6, %bb5 ]
%exitcond = icmp ne i3000 %i.0, 1024
br i1 %exitcond, label %bb2, label %bb7
bb2: ; preds = %bb1
%tmp = getelementptr inbounds i3000, i3000* %A, i3000 %i.0
%tmp3 = load i3000, i3000* %tmp, align 4
%tmp4 = add i3000 %tmp3, %b
store i3000 %tmp4, i3000* %tmp, align 4
br label %bb5
bb5: ; preds = %bb2
%tmp6 = add nuw nsw i3000 %i.0, 1
br label %bb1
bb7: ; preds = %bb1
ret void
}