blob: 6f6051144b710f89eaad4f371a1974d7a0c9c27e [file] [log] [blame]
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
; RUN: opt -passes=infer-alignment -S < %s | FileCheck %s
; ------------------------------------------------------------------------------
; Array of pair
; ------------------------------------------------------------------------------
; Check that we improve the alignment information.
; The base pointer is 16-byte aligned and we access the field at offsets of 8
; bytes.
; Every element in the @array.simple array is 16-byte aligned so any access from
; the following gep is 8-byte aligned.
%pair.simple = type { ptr, i32 }
@array.simple = global [4 x %pair.simple] zeroinitializer, align 16
define void @simple_pair(i64 %idx) {
; CHECK-LABEL: define void @simple_pair
; CHECK-SAME: (i64 [[IDX:%.*]]) {
; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds [4 x %pair.simple], ptr @array.simple, i64 0, i64 [[IDX]], i32 1
; CHECK-NEXT: [[RES:%.*]] = load i32, ptr [[GEP]], align 8
; CHECK-NEXT: store i32 0, ptr [[GEP]], align 8
; CHECK-NEXT: ret void
;
%gep = getelementptr inbounds [4 x %pair.simple], ptr @array.simple, i64 0, i64 %idx, i32 1
%res = load i32, ptr %gep, align 1
store i32 0, ptr %gep, align 1
ret void
}
; ------------------------------------------------------------------------------
; Array of pair of arrays
; ------------------------------------------------------------------------------
%pair.array = type { [3 x i32], [3 x i32] }
@array.array = internal global [3 x %pair.array] zeroinitializer
define void @load_nested() {
; CHECK-LABEL: define void @load_nested() {
; CHECK-NEXT: [[X_0:%.*]] = load i32, ptr @array.array, align 16
; CHECK-NEXT: [[X_1:%.*]] = load i32, ptr getelementptr inbounds ([3 x %pair.array], ptr @array.array, i64 0, i64 0, i32 0, i64 1), align 4
; CHECK-NEXT: [[X_2:%.*]] = load i32, ptr getelementptr inbounds ([3 x %pair.array], ptr @array.array, i64 0, i64 0, i32 0, i64 2), align 8
; CHECK-NEXT: [[X_3:%.*]] = load i32, ptr getelementptr ([3 x %pair.array], ptr @array.array, i64 0, i64 0, i32 0, i64 3), align 4
; CHECK-NEXT: [[X_4:%.*]] = load i32, ptr getelementptr ([3 x %pair.array], ptr @array.array, i64 0, i64 0, i32 0, i64 4), align 16
; CHECK-NEXT: ret void
;
%x.0 = load i32, ptr @array.array, align 4
%x.1 = load i32, ptr getelementptr ([3 x %pair.array], ptr @array.array, i64 0, i64 0, i32 0, i64 1), align 4
%x.2 = load i32, ptr getelementptr ([3 x %pair.array], ptr @array.array, i64 0, i64 0, i32 0, i64 2), align 4
%x.3 = load i32, ptr getelementptr ([3 x %pair.array], ptr @array.array, i64 0, i64 0, i32 0, i64 3), align 4
%x.4 = load i32, ptr getelementptr ([3 x %pair.array], ptr @array.array, i64 0, i64 0, i32 0, i64 4), align 4
ret void
}
define void @store_nested() {
; CHECK-LABEL: define void @store_nested() {
; CHECK-NEXT: store i32 1, ptr @array.array, align 16
; CHECK-NEXT: store i32 1, ptr getelementptr inbounds ([3 x %pair.array], ptr @array.array, i64 0, i64 0, i32 0, i64 1), align 4
; CHECK-NEXT: store i32 1, ptr getelementptr inbounds ([3 x %pair.array], ptr @array.array, i64 0, i64 0, i32 0, i64 2), align 8
; CHECK-NEXT: store i32 1, ptr getelementptr ([3 x %pair.array], ptr @array.array, i64 0, i64 0, i32 0, i64 3), align 4
; CHECK-NEXT: store i32 1, ptr getelementptr ([3 x %pair.array], ptr @array.array, i64 0, i64 0, i32 0, i64 4), align 16
; CHECK-NEXT: ret void
;
store i32 1, ptr @array.array, align 4
store i32 1, ptr getelementptr ([3 x %pair.array], ptr @array.array, i64 0, i64 0, i32 0, i64 1), align 4
store i32 1, ptr getelementptr ([3 x %pair.array], ptr @array.array, i64 0, i64 0, i32 0, i64 2), align 4
store i32 1, ptr getelementptr ([3 x %pair.array], ptr @array.array, i64 0, i64 0, i32 0, i64 3), align 4
store i32 1, ptr getelementptr ([3 x %pair.array], ptr @array.array, i64 0, i64 0, i32 0, i64 4), align 4
ret void
}