blob: 140e49d262de3fcccbc04118af5ca73b8c24b65a [file] [log] [blame]
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; Verify that calls to strtoul and strtoull are interpreted correctly even
; in corner cases (or not folded).
;
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
declare i32 @strtoul(i8*, i8**, i32)
declare i64 @strtoull(i8*, i8**, i32)
; All POSIX whitespace characters.
@ws = constant [7 x i8] c"\09\0d\0a\0b\0c \00"
; A negative and positive number preceded by all POSIX whitespace.
@ws_im123 = constant [11 x i8] c"\09\0d\0a\0b\0c -123\00"
@ws_ip234 = constant [11 x i8] c"\09\0d\0a\0b\0c +234\00"
@i32min = constant [13 x i8] c" -2147483648\00"
@i32min_m1 = constant [13 x i8] c" -2147483649\00"
@o32min = constant [15 x i8] c" +020000000000\00"
@mo32min = constant [15 x i8] c" -020000000000\00"
@x32min = constant [13 x i8] c" +0x80000000\00"
@mx32min = constant [13 x i8] c" +0x80000000\00"
@i32max = constant [12 x i8] c" 2147483647\00"
@i32max_p1 = constant [12 x i8] c" 2147483648\00"
@mX01 = constant [6 x i8] c" -0X1\00"
@ui32max = constant [12 x i8] c" 4294967295\00"
@ui32max_p1 = constant [12 x i8] c" 4294967296\00"
@i64min = constant [22 x i8] c" -9223372036854775808\00"
@i64min_m1 = constant [22 x i8] c" -9223372036854775809\00"
@i64max = constant [21 x i8] c" 9223372036854775807\00"
@i64max_p1 = constant [21 x i8] c" 9223372036854775808\00"
@ui64max = constant [22 x i8] c" 18446744073709551615\00"
@x64max = constant [20 x i8] c" 0xffffffffffffffff\00"
@ui64max_p1 = constant [22 x i8] c" 18446744073709551616\00"
@endptr = external global i8*
; Exercise folding calls to 32-bit strtoul.
define void @fold_strtoul(i32* %ps) {
; CHECK-LABEL: @fold_strtoul(
; CHECK-NEXT: store i8* getelementptr inbounds ([11 x i8], [11 x i8]* @ws_im123, i64 0, i64 10), i8** @endptr, align 8
; CHECK-NEXT: store i32 -123, i32* [[PS:%.*]], align 4
; CHECK-NEXT: store i8* getelementptr inbounds ([11 x i8], [11 x i8]* @ws_ip234, i64 0, i64 10), i8** @endptr, align 8
; CHECK-NEXT: [[PS1:%.*]] = getelementptr i32, i32* [[PS]], i64 1
; CHECK-NEXT: store i32 234, i32* [[PS1]], align 4
; CHECK-NEXT: store i8* getelementptr inbounds ([13 x i8], [13 x i8]* @i32min_m1, i64 0, i64 12), i8** @endptr, align 8
; CHECK-NEXT: [[PS2:%.*]] = getelementptr i32, i32* [[PS]], i64 2
; CHECK-NEXT: store i32 2147483647, i32* [[PS2]], align 4
; CHECK-NEXT: store i8* getelementptr inbounds ([13 x i8], [13 x i8]* @i32min, i64 0, i64 12), i8** @endptr, align 8
; CHECK-NEXT: [[PS3:%.*]] = getelementptr i32, i32* [[PS]], i64 3
; CHECK-NEXT: store i32 -2147483648, i32* [[PS3]], align 4
; CHECK-NEXT: store i8* getelementptr inbounds ([15 x i8], [15 x i8]* @o32min, i64 0, i64 14), i8** @endptr, align 8
; CHECK-NEXT: [[PS4:%.*]] = getelementptr i32, i32* [[PS]], i64 4
; CHECK-NEXT: store i32 -2147483648, i32* [[PS4]], align 4
; CHECK-NEXT: store i8* getelementptr inbounds ([15 x i8], [15 x i8]* @mo32min, i64 0, i64 14), i8** @endptr, align 8
; CHECK-NEXT: [[PS5:%.*]] = getelementptr i32, i32* [[PS]], i64 5
; CHECK-NEXT: store i32 -2147483648, i32* [[PS5]], align 4
; CHECK-NEXT: store i8* getelementptr inbounds ([13 x i8], [13 x i8]* @x32min, i64 0, i64 12), i8** @endptr, align 8
; CHECK-NEXT: [[PS6:%.*]] = getelementptr i32, i32* [[PS]], i64 6
; CHECK-NEXT: store i32 -2147483648, i32* [[PS6]], align 4
; CHECK-NEXT: store i8* getelementptr inbounds ([13 x i8], [13 x i8]* @mx32min, i64 0, i64 12), i8** @endptr, align 8
; CHECK-NEXT: [[PS7:%.*]] = getelementptr i32, i32* [[PS]], i64 7
; CHECK-NEXT: store i32 -2147483648, i32* [[PS7]], align 4
; CHECK-NEXT: store i8* getelementptr inbounds ([12 x i8], [12 x i8]* @i32max, i64 0, i64 11), i8** @endptr, align 8
; CHECK-NEXT: [[PS8:%.*]] = getelementptr i32, i32* [[PS]], i64 8
; CHECK-NEXT: store i32 2147483647, i32* [[PS8]], align 4
; CHECK-NEXT: store i8* getelementptr inbounds ([6 x i8], [6 x i8]* @mX01, i64 0, i64 5), i8** @endptr, align 8
; CHECK-NEXT: [[PS9:%.*]] = getelementptr i32, i32* [[PS]], i64 9
; CHECK-NEXT: store i32 -1, i32* [[PS9]], align 4
; CHECK-NEXT: store i8* getelementptr inbounds ([12 x i8], [12 x i8]* @i32max_p1, i64 0, i64 11), i8** @endptr, align 8
; CHECK-NEXT: [[PS10:%.*]] = getelementptr i32, i32* [[PS]], i64 10
; CHECK-NEXT: store i32 -2147483648, i32* [[PS10]], align 4
; CHECK-NEXT: store i8* getelementptr inbounds ([12 x i8], [12 x i8]* @ui32max, i64 0, i64 11), i8** @endptr, align 8
; CHECK-NEXT: [[PS11:%.*]] = getelementptr i32, i32* [[PS]], i64 11
; CHECK-NEXT: store i32 -1, i32* [[PS11]], align 4
; CHECK-NEXT: ret void
;
; Fold a valid sequence with leading POSIX whitespace and a minus to
; (uint32_t)-123.
%pwsm123 = getelementptr [11 x i8], [11 x i8]* @ws_im123, i32 0, i32 0
%im123 = call i32 @strtoul(i8* %pwsm123, i8** @endptr, i32 10)
%ps0 = getelementptr i32, i32* %ps, i32 0
store i32 %im123, i32* %ps0
; Fold a valid sequence with leading POSIX whitespace and a plus to +234.
%pwsp234 = getelementptr [11 x i8], [11 x i8]* @ws_ip234, i32 0, i32 0
%ip234 = call i32 @strtoul(i8* %pwsp234, i8** @endptr, i32 10)
%ps1 = getelementptr i32, i32* %ps, i32 1
store i32 %ip234, i32* %ps1
; Fold the result of conversion that's equal to INT32_MIN - 1.
%psi32minm1 = getelementptr [13 x i8], [13 x i8]* @i32min_m1, i32 0, i32 0
%i32min32m1 = call i32 @strtoul(i8* %psi32minm1, i8** @endptr, i32 10)
%ps2 = getelementptr i32, i32* %ps, i32 2
store i32 %i32min32m1, i32* %ps2
; Fold INT32_MIN.
%psi32min = getelementptr [13 x i8], [13 x i8]* @i32min, i32 0, i32 0
%i32min = call i32 @strtoul(i8* %psi32min, i8** @endptr, i32 10)
%ps3 = getelementptr i32, i32* %ps, i32 3
store i32 %i32min, i32* %ps3
; Fold INT32_MIN in octal.
%pso32min = getelementptr [15 x i8], [15 x i8]* @o32min, i32 0, i32 0
%o32min = call i32 @strtoul(i8* %pso32min, i8** @endptr, i32 0)
%ps4 = getelementptr i32, i32* %ps, i32 4
store i32 %o32min, i32* %ps4
; Fold -INT32_MIN in octal.
%psmo32min = getelementptr [15 x i8], [15 x i8]* @mo32min, i32 0, i32 0
%mo32min = call i32 @strtoul(i8* %psmo32min, i8** @endptr, i32 0)
%ps5 = getelementptr i32, i32* %ps, i32 5
store i32 %mo32min, i32* %ps5
; Fold INT32_MIN in hex.
%psx32min = getelementptr [13 x i8], [13 x i8]* @x32min, i32 0, i32 0
%x32min = call i32 @strtoul(i8* %psx32min, i8** @endptr, i32 0)
%ps6 = getelementptr i32, i32* %ps, i32 6
store i32 %x32min, i32* %ps6
; Fold -INT32_MIN in hex.
%psmx32min = getelementptr [13 x i8], [13 x i8]* @mx32min, i32 0, i32 0
%mx32min = call i32 @strtoul(i8* %psmx32min, i8** @endptr, i32 0)
%ps7 = getelementptr i32, i32* %ps, i32 7
store i32 %x32min, i32* %ps7
; Fold INT32_MAX.
%psi32max = getelementptr [12 x i8], [12 x i8]* @i32max, i32 0, i32 0
%i32max = call i32 @strtoul(i8* %psi32max, i8** @endptr, i32 10)
%ps8 = getelementptr i32, i32* %ps, i32 8
store i32 %i32max, i32* %ps8
; Fold -0x01.
%psmX01 = getelementptr [6 x i8], [6 x i8]* @mX01, i32 0, i32 0
%mX01 = call i32 @strtoul(i8* %psmX01, i8** @endptr, i32 0)
%ps9 = getelementptr i32, i32* %ps, i32 9
store i32 %mX01, i32* %ps9
; Fold the result of conversion that's equal to INT32_MAX + 1.
%psmax32p1 = getelementptr [12 x i8], [12 x i8]* @i32max_p1, i32 0, i32 0
%i32max32p1 = call i32 @strtoul(i8* %psmax32p1, i8** @endptr, i32 10)
%ps10 = getelementptr i32, i32* %ps, i32 10
store i32 %i32max32p1, i32* %ps10
; Fold UINT32_MAX.
%psmax = getelementptr [12 x i8], [12 x i8]* @ui32max, i32 0, i32 0
%ui32max = call i32 @strtoul(i8* %psmax, i8** @endptr, i32 10)
%ps11 = getelementptr i32, i32* %ps, i32 11
store i32 %ui32max, i32* %ps11
ret void
}
; Exercise not folding calls to 32-bit strtoul.
define void @call_strtoul(i32* %ps) {
; CHECK-LABEL: @call_strtoul(
; CHECK-NEXT: [[MINM1:%.*]] = call i32 @strtoul(i8* getelementptr inbounds ([22 x i8], [22 x i8]* @i64min_m1, i64 0, i64 0), i8** nonnull @endptr, i32 10)
; CHECK-NEXT: store i32 [[MINM1]], i32* [[PS:%.*]], align 4
; CHECK-NEXT: [[MAXP1:%.*]] = call i32 @strtoul(i8* getelementptr inbounds ([12 x i8], [12 x i8]* @ui32max_p1, i64 0, i64 0), i8** nonnull @endptr, i32 10)
; CHECK-NEXT: [[PS1:%.*]] = getelementptr i32, i32* [[PS]], i64 1
; CHECK-NEXT: store i32 [[MAXP1]], i32* [[PS1]], align 4
; CHECK-NEXT: [[NWS:%.*]] = call i32 @strtoul(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @ws, i64 0, i64 0), i8** nonnull @endptr, i32 10)
; CHECK-NEXT: [[PS2:%.*]] = getelementptr i32, i32* [[PS]], i64 2
; CHECK-NEXT: store i32 [[NWS]], i32* [[PS2]], align 4
; CHECK-NEXT: [[NWSP6:%.*]] = call i32 @strtoul(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @ws, i64 0, i64 6), i8** nonnull @endptr, i32 10)
; CHECK-NEXT: [[PS3:%.*]] = getelementptr i32, i32* [[PS]], i64 3
; CHECK-NEXT: store i32 [[NWSP6]], i32* [[PS3]], align 4
; CHECK-NEXT: ret void
;
; Do not fold the result of conversion that overflows uint32_t. This
; could be folded into a constant provided errnor were set to ERANGE.
%psminm1 = getelementptr [22 x i8], [22 x i8]* @i64min_m1, i32 0, i32 0
%minm1 = call i32 @strtoul(i8* %psminm1, i8** @endptr, i32 10)
%ps0 = getelementptr i32, i32* %ps, i32 0
store i32 %minm1, i32* %ps0
; Do not fold the result of conversion that's greater than UINT32_MAX
; (same logic as above applies here).
%psui32maxp1 = getelementptr [12 x i8], [12 x i8]* @ui32max_p1, i32 0, i32 0
%maxp1 = call i32 @strtoul(i8* %psui32maxp1, i8** @endptr, i32 10)
%ps1 = getelementptr i32, i32* %ps, i32 1
store i32 %maxp1, i32* %ps1
; Do not fold a sequence consisting of just whitespace characters.
%psws = getelementptr [7 x i8], [7 x i8]* @ws, i32 0, i32 0
%nws = call i32 @strtoul(i8* %psws, i8** @endptr, i32 10)
%ps2 = getelementptr i32, i32* %ps, i32 2
store i32 %nws, i32* %ps2
; Do not fold an empty sequence. The library call may or may not end up
; storing EINVAL in errno.
%pswsp6 = getelementptr [7 x i8], [7 x i8]* @ws, i32 0, i32 6
%nwsp6 = call i32 @strtoul(i8* %pswsp6, i8** @endptr, i32 10)
%ps3 = getelementptr i32, i32* %ps, i32 3
store i32 %nwsp6, i32* %ps3
ret void
}
; Exercise folding calls to 64-bit strtoull.
define void @fold_strtoull(i64* %ps) {
; CHECK-LABEL: @fold_strtoull(
; CHECK-NEXT: store i8* getelementptr inbounds ([11 x i8], [11 x i8]* @ws_im123, i64 0, i64 10), i8** @endptr, align 8
; CHECK-NEXT: store i64 -123, i64* [[PS:%.*]], align 4
; CHECK-NEXT: store i8* getelementptr inbounds ([11 x i8], [11 x i8]* @ws_ip234, i64 0, i64 10), i8** @endptr, align 8
; CHECK-NEXT: [[PS1:%.*]] = getelementptr i64, i64* [[PS]], i64 1
; CHECK-NEXT: store i64 234, i64* [[PS1]], align 4
; CHECK-NEXT: store i8* getelementptr inbounds ([22 x i8], [22 x i8]* @i64min_m1, i64 0, i64 21), i8** @endptr, align 8
; CHECK-NEXT: [[PS2:%.*]] = getelementptr i64, i64* [[PS]], i64 2
; CHECK-NEXT: store i64 9223372036854775807, i64* [[PS2]], align 4
; CHECK-NEXT: store i8* getelementptr inbounds ([13 x i8], [13 x i8]* @i32min, i64 0, i64 12), i8** @endptr, align 8
; CHECK-NEXT: [[PS3:%.*]] = getelementptr i64, i64* [[PS]], i64 3
; CHECK-NEXT: store i64 -2147483648, i64* [[PS3]], align 4
; CHECK-NEXT: store i8* getelementptr inbounds ([15 x i8], [15 x i8]* @o32min, i64 0, i64 14), i8** @endptr, align 8
; CHECK-NEXT: [[PS4:%.*]] = getelementptr i64, i64* [[PS]], i64 4
; CHECK-NEXT: store i64 2147483648, i64* [[PS4]], align 4
; CHECK-NEXT: store i8* getelementptr inbounds ([13 x i8], [13 x i8]* @x32min, i64 0, i64 12), i8** @endptr, align 8
; CHECK-NEXT: [[PS5:%.*]] = getelementptr i64, i64* [[PS]], i64 5
; CHECK-NEXT: store i64 2147483648, i64* [[PS5]], align 4
; CHECK-NEXT: store i8* getelementptr inbounds ([22 x i8], [22 x i8]* @i64min, i64 0, i64 21), i8** @endptr, align 8
; CHECK-NEXT: [[PS6:%.*]] = getelementptr i64, i64* [[PS]], i64 6
; CHECK-NEXT: store i64 -9223372036854775808, i64* [[PS6]], align 4
; CHECK-NEXT: store i8* getelementptr inbounds ([21 x i8], [21 x i8]* @i64max, i64 0, i64 20), i8** @endptr, align 8
; CHECK-NEXT: [[PS7:%.*]] = getelementptr i64, i64* [[PS]], i64 7
; CHECK-NEXT: store i64 9223372036854775807, i64* [[PS7]], align 4
; CHECK-NEXT: store i8* getelementptr inbounds ([21 x i8], [21 x i8]* @i64max_p1, i64 0, i64 20), i8** @endptr, align 8
; CHECK-NEXT: [[PS8:%.*]] = getelementptr i64, i64* [[PS]], i64 8
; CHECK-NEXT: store i64 -9223372036854775808, i64* [[PS8]], align 4
; CHECK-NEXT: store i8* getelementptr inbounds ([22 x i8], [22 x i8]* @ui64max, i64 0, i64 21), i8** @endptr, align 8
; CHECK-NEXT: [[PS9:%.*]] = getelementptr i64, i64* [[PS]], i64 9
; CHECK-NEXT: store i64 -1, i64* [[PS9]], align 4
; CHECK-NEXT: store i8* getelementptr inbounds ([20 x i8], [20 x i8]* @x64max, i64 0, i64 19), i8** @endptr, align 8
; CHECK-NEXT: [[PS10:%.*]] = getelementptr i64, i64* [[PS]], i64 10
; CHECK-NEXT: store i64 -1, i64* [[PS10]], align 4
; CHECK-NEXT: ret void
;
; Fold a valid sequence with leading POSIX whitespace and a minus to
; (uint64_t)-123.
%pwsm123 = getelementptr [11 x i8], [11 x i8]* @ws_im123, i32 0, i32 0
%im123 = call i64 @strtoull(i8* %pwsm123, i8** @endptr, i32 10)
%ps0 = getelementptr i64, i64* %ps, i32 0
store i64 %im123, i64* %ps0
; Fold a valid sequence with leading POSIX whitespace and a plus to +234.
%pwsp234 = getelementptr [11 x i8], [11 x i8]* @ws_ip234, i32 0, i32 0
%ip234 = call i64 @strtoull(i8* %pwsp234, i8** @endptr, i32 10)
%ps1 = getelementptr i64, i64* %ps, i32 1
store i64 %ip234, i64* %ps1
; Fold the result of conversion that's equal to INT64_MIN - 1.
%psi64minm1 = getelementptr [22 x i8], [22 x i8]* @i64min_m1, i32 0, i32 0
%i64min32m1 = call i64 @strtoull(i8* %psi64minm1, i8** @endptr, i32 10)
%ps2 = getelementptr i64, i64* %ps, i32 2
store i64 %i64min32m1, i64* %ps2
; Fold INT32_MIN.
%psi32min = getelementptr [13 x i8], [13 x i8]* @i32min, i32 0, i32 0
%i32min = call i64 @strtoull(i8* %psi32min, i8** @endptr, i32 10)
%ps3 = getelementptr i64, i64* %ps, i32 3
store i64 %i32min, i64* %ps3
; Fold INT32_MIN in octal.
%pso32min = getelementptr [15 x i8], [15 x i8]* @o32min, i32 0, i32 0
%o32min = call i64 @strtoull(i8* %pso32min, i8** @endptr, i32 0)
%ps4 = getelementptr i64, i64* %ps, i32 4
store i64 %o32min, i64* %ps4
; Fold INT32_MIN in hex.
%psx32min = getelementptr [13 x i8], [13 x i8]* @x32min, i32 0, i32 0
%x32min = call i64 @strtoull(i8* %psx32min, i8** @endptr, i32 0)
%ps5 = getelementptr i64, i64* %ps, i32 5
store i64 %x32min, i64* %ps5
; Fold INT64_MIN.
%psi64min = getelementptr [22 x i8], [22 x i8]* @i64min, i32 0, i32 0
%i64min = call i64 @strtoull(i8* %psi64min, i8** @endptr, i32 10)
%ps6 = getelementptr i64, i64* %ps, i32 6
store i64 %i64min, i64* %ps6
; Fold INT64_MAX.
%psi64max = getelementptr [21 x i8], [21 x i8]* @i64max, i32 0, i32 0
%i64max = call i64 @strtoull(i8* %psi64max, i8** @endptr, i32 10)
%ps7 = getelementptr i64, i64* %ps, i32 7
store i64 %i64max, i64* %ps7
; Fold the result of conversion that's equal to INT64_MAX + 1 to INT64_MIN.
%psmax32p1 = getelementptr [21 x i8], [21 x i8]* @i64max_p1, i32 0, i32 0
%i64max32p1 = call i64 @strtoull(i8* %psmax32p1, i8** @endptr, i32 10)
%ps8 = getelementptr i64, i64* %ps, i32 8
store i64 %i64max32p1, i64* %ps8
; Fold UINT64_MAX.
%psmax = getelementptr [22 x i8], [22 x i8]* @ui64max, i32 0, i32 0
%ui64max = call i64 @strtoull(i8* %psmax, i8** @endptr, i32 10)
%ps9 = getelementptr i64, i64* %ps, i32 9
store i64 %ui64max, i64* %ps9
; Fold UINT64_MAX in hex.
%psxmax = getelementptr [20 x i8], [20 x i8]* @x64max, i32 0, i32 0
%x64max = call i64 @strtoull(i8* %psxmax, i8** @endptr, i32 0)
%ps10 = getelementptr i64, i64* %ps, i32 10
store i64 %x64max, i64* %ps10
ret void
}
; Exercise not folding calls to 64-bit strtoull.
define void @call_strtoull(i64* %ps) {
; CHECK-LABEL: @call_strtoull(
; CHECK-NEXT: [[MAXP1:%.*]] = call i64 @strtoull(i8* getelementptr inbounds ([22 x i8], [22 x i8]* @ui64max_p1, i64 0, i64 0), i8** nonnull @endptr, i32 10)
; CHECK-NEXT: [[PS1:%.*]] = getelementptr i64, i64* [[PS:%.*]], i64 1
; CHECK-NEXT: store i64 [[MAXP1]], i64* [[PS1]], align 4
; CHECK-NEXT: [[NWS:%.*]] = call i64 @strtoull(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @ws, i64 0, i64 0), i8** nonnull @endptr, i32 10)
; CHECK-NEXT: [[PS2:%.*]] = getelementptr i64, i64* [[PS]], i64 2
; CHECK-NEXT: store i64 [[NWS]], i64* [[PS2]], align 4
; CHECK-NEXT: [[NWSP6:%.*]] = call i64 @strtoull(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @ws, i64 0, i64 6), i8** nonnull @endptr, i32 10)
; CHECK-NEXT: [[PS3:%.*]] = getelementptr i64, i64* [[PS]], i64 3
; CHECK-NEXT: store i64 [[NWSP6]], i64* [[PS3]], align 4
; CHECK-NEXT: ret void
;
; Do not fold the result of conversion that overflows uint64_t. This
; could be folded into a constant provided errnor were set to ERANGE.
%psui64maxp1 = getelementptr [22 x i8], [22 x i8]* @ui64max_p1, i32 0, i32 0
%maxp1 = call i64 @strtoull(i8* %psui64maxp1, i8** @endptr, i32 10)
%ps1 = getelementptr i64, i64* %ps, i32 1
store i64 %maxp1, i64* %ps1
; Do not fold a sequence consisting of just whitespace characters.
%psws = getelementptr [7 x i8], [7 x i8]* @ws, i32 0, i32 0
%nws = call i64 @strtoull(i8* %psws, i8** @endptr, i32 10)
%ps2 = getelementptr i64, i64* %ps, i32 2
store i64 %nws, i64* %ps2
; Do not fold an empty sequence. The library call may or may not end up
; storing EINVAL in errno.
%pswsp6 = getelementptr [7 x i8], [7 x i8]* @ws, i32 0, i32 6
%nwsp6 = call i64 @strtoull(i8* %pswsp6, i8** @endptr, i32 10)
%ps3 = getelementptr i64, i64* %ps, i32 3
store i64 %nwsp6, i64* %ps3
ret void
}