[NFC][X86][AArch64] Add tests for the 'check for [no] signed truncation' pattern

Summary:
[[ https://bugs.llvm.org/show_bug.cgi?id=38149 | PR38149 ]]

As discussed in https://reviews.llvm.org/D49179#1158957 and later,
the IR can be improved:
https://rise4fun.com/Alive/gBf
^ that pattern will be produced by Implicit Integer Truncation sanitizer,
https://reviews.llvm.org/D48958
https://bugs.llvm.org/show_bug.cgi?id=21530
in signed case, therefore it is probably a good idea to improve it.

But as it looks from these tests,
i think we want to revert at least some cases in DAGCombine.

Reviewers: spatel, craig.topper, RKSimon, javed.absar

Reviewed By: spatel

Subscribers: kristof.beyls, llvm-commits

Differential Revision: https://reviews.llvm.org/D49247

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@336917 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/test/CodeGen/AArch64/lack-of-signed-truncation-check.ll b/test/CodeGen/AArch64/lack-of-signed-truncation-check.ll
new file mode 100644
index 0000000..e8c6f84
--- /dev/null
+++ b/test/CodeGen/AArch64/lack-of-signed-truncation-check.ll
@@ -0,0 +1,258 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s
+
+; https://bugs.llvm.org/show_bug.cgi?id=38149
+
+; We are truncating from wider width, and then sign-extending
+; back to the original width. Then we inequality-comparing orig and src.
+; If they don't match, then we had signed truncation during truncation.
+
+; This can be expressed in a several ways in IR:
+;   trunc + sext + icmp ne <- not canonical
+;   shl   + ashr + icmp ne
+;   add          + icmp ult
+;   add          + icmp uge
+; However only the simplest form (with two shifts) gets lowered best.
+
+; ---------------------------------------------------------------------------- ;
+; shl + ashr + icmp ne
+; ---------------------------------------------------------------------------- ;
+
+define i1 @shifts_necmp_i16_i8(i16 %x) nounwind {
+; CHECK-LABEL: shifts_necmp_i16_i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sxtb w8, w0
+; CHECK-NEXT:    and w8, w8, #0xffff
+; CHECK-NEXT:    cmp w8, w0, uxth
+; CHECK-NEXT:    cset w0, ne
+; CHECK-NEXT:    ret
+  %tmp0 = shl i16 %x, 8 ; 16-8
+  %tmp1 = ashr exact i16 %tmp0, 8 ; 16-8
+  %tmp2 = icmp ne i16 %tmp1, %x
+  ret i1 %tmp2
+}
+
+define i1 @shifts_necmp_i32_i16(i32 %x) nounwind {
+; CHECK-LABEL: shifts_necmp_i32_i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sxth w8, w0
+; CHECK-NEXT:    cmp w8, w0
+; CHECK-NEXT:    cset w0, ne
+; CHECK-NEXT:    ret
+  %tmp0 = shl i32 %x, 16 ; 32-16
+  %tmp1 = ashr exact i32 %tmp0, 16 ; 32-16
+  %tmp2 = icmp ne i32 %tmp1, %x
+  ret i1 %tmp2
+}
+
+define i1 @shifts_necmp_i32_i8(i32 %x) nounwind {
+; CHECK-LABEL: shifts_necmp_i32_i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sxtb w8, w0
+; CHECK-NEXT:    cmp w8, w0
+; CHECK-NEXT:    cset w0, ne
+; CHECK-NEXT:    ret
+  %tmp0 = shl i32 %x, 24 ; 32-8
+  %tmp1 = ashr exact i32 %tmp0, 24 ; 32-8
+  %tmp2 = icmp ne i32 %tmp1, %x
+  ret i1 %tmp2
+}
+
+define i1 @shifts_necmp_i64_i32(i64 %x) nounwind {
+; CHECK-LABEL: shifts_necmp_i64_i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sxtw x8, w0
+; CHECK-NEXT:    cmp x8, x0
+; CHECK-NEXT:    cset w0, ne
+; CHECK-NEXT:    ret
+  %tmp0 = shl i64 %x, 32 ; 64-32
+  %tmp1 = ashr exact i64 %tmp0, 32 ; 64-32
+  %tmp2 = icmp ne i64 %tmp1, %x
+  ret i1 %tmp2
+}
+
+define i1 @shifts_necmp_i64_i16(i64 %x) nounwind {
+; CHECK-LABEL: shifts_necmp_i64_i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sxth x8, w0
+; CHECK-NEXT:    cmp x8, x0
+; CHECK-NEXT:    cset w0, ne
+; CHECK-NEXT:    ret
+  %tmp0 = shl i64 %x, 48 ; 64-16
+  %tmp1 = ashr exact i64 %tmp0, 48 ; 64-16
+  %tmp2 = icmp ne i64 %tmp1, %x
+  ret i1 %tmp2
+}
+
+define i1 @shifts_necmp_i64_i8(i64 %x) nounwind {
+; CHECK-LABEL: shifts_necmp_i64_i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sxtb x8, w0
+; CHECK-NEXT:    cmp x8, x0
+; CHECK-NEXT:    cset w0, ne
+; CHECK-NEXT:    ret
+  %tmp0 = shl i64 %x, 56 ; 64-8
+  %tmp1 = ashr exact i64 %tmp0, 56 ; 64-8
+  %tmp2 = icmp ne i64 %tmp1, %x
+  ret i1 %tmp2
+}
+
+; ---------------------------------------------------------------------------- ;
+; add + icmp ult
+; ---------------------------------------------------------------------------- ;
+
+define i1 @add_ultcmp_i16_i8(i16 %x) nounwind {
+; CHECK-LABEL: add_ultcmp_i16_i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub w8, w0, #128 // =128
+; CHECK-NEXT:    ubfx w8, w8, #8, #8
+; CHECK-NEXT:    cmp w8, #255 // =255
+; CHECK-NEXT:    cset w0, lo
+; CHECK-NEXT:    ret
+  %tmp0 = add i16 %x, -128 ; ~0U << (8-1)
+  %tmp1 = icmp ult i16 %tmp0, -256 ; ~0U << 8
+  ret i1 %tmp1
+}
+
+define i1 @add_ultcmp_i32_i16(i32 %x) nounwind {
+; CHECK-LABEL: add_ultcmp_i32_i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub w8, w0, #8, lsl #12 // =32768
+; CHECK-NEXT:    cmn w8, #16, lsl #12 // =65536
+; CHECK-NEXT:    cset w0, lo
+; CHECK-NEXT:    ret
+  %tmp0 = add i32 %x, -32768 ; ~0U << (16-1)
+  %tmp1 = icmp ult i32 %tmp0, -65536 ; ~0U << 16
+  ret i1 %tmp1
+}
+
+define i1 @add_ultcmp_i32_i8(i32 %x) nounwind {
+; CHECK-LABEL: add_ultcmp_i32_i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub w8, w0, #128 // =128
+; CHECK-NEXT:    cmn w8, #256 // =256
+; CHECK-NEXT:    cset w0, lo
+; CHECK-NEXT:    ret
+  %tmp0 = add i32 %x, -128 ; ~0U << (8-1)
+  %tmp1 = icmp ult i32 %tmp0, -256 ; ~0U << 8
+  ret i1 %tmp1
+}
+
+define i1 @add_ultcmp_i64_i32(i64 %x) nounwind {
+; CHECK-LABEL: add_ultcmp_i64_i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov x8, #-2147483648
+; CHECK-NEXT:    add x8, x0, x8
+; CHECK-NEXT:    mov x9, #-4294967296
+; CHECK-NEXT:    cmp x8, x9
+; CHECK-NEXT:    cset w0, lo
+; CHECK-NEXT:    ret
+  %tmp0 = add i64 %x, -2147483648 ; ~0U << (32-1)
+  %tmp1 = icmp ult i64 %tmp0, -4294967296 ; ~0U << 32
+  ret i1 %tmp1
+}
+
+define i1 @add_ultcmp_i64_i16(i64 %x) nounwind {
+; CHECK-LABEL: add_ultcmp_i64_i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub x8, x0, #8, lsl #12 // =32768
+; CHECK-NEXT:    cmn x8, #16, lsl #12 // =65536
+; CHECK-NEXT:    cset w0, lo
+; CHECK-NEXT:    ret
+  %tmp0 = add i64 %x, -32768 ; ~0U << (16-1)
+  %tmp1 = icmp ult i64 %tmp0, -65536 ; ~0U << 16
+  ret i1 %tmp1
+}
+
+define i1 @add_ultcmp_i64_i8(i64 %x) nounwind {
+; CHECK-LABEL: add_ultcmp_i64_i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub x8, x0, #128 // =128
+; CHECK-NEXT:    cmn x8, #256 // =256
+; CHECK-NEXT:    cset w0, lo
+; CHECK-NEXT:    ret
+  %tmp0 = add i64 %x, -128 ; ~0U << (8-1)
+  %tmp1 = icmp ult i64 %tmp0, -256 ; ~0U << 8
+  ret i1 %tmp1
+}
+
+; ---------------------------------------------------------------------------- ;
+; add + icmp uge
+; ---------------------------------------------------------------------------- ;
+
+define i1 @add_ugecmp_i16_i8(i16 %x) nounwind {
+; CHECK-LABEL: add_ugecmp_i16_i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    add w8, w0, #128 // =128
+; CHECK-NEXT:    and w8, w8, #0xffff
+; CHECK-NEXT:    cmp w8, #255 // =255
+; CHECK-NEXT:    cset w0, hi
+; CHECK-NEXT:    ret
+  %tmp0 = add i16 %x, 128 ; 1U << (8-1)
+  %tmp1 = icmp uge i16 %tmp0, 256 ; 1U << 8
+  ret i1 %tmp1
+}
+
+define i1 @add_ugecmp_i32_i16(i32 %x) nounwind {
+; CHECK-LABEL: add_ugecmp_i32_i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    add w8, w0, #8, lsl #12 // =32768
+; CHECK-NEXT:    lsr w8, w8, #16
+; CHECK-NEXT:    cmp w8, #0 // =0
+; CHECK-NEXT:    cset w0, ne
+; CHECK-NEXT:    ret
+  %tmp0 = add i32 %x, 32768 ; 1U << (16-1)
+  %tmp1 = icmp uge i32 %tmp0, 65536 ; 1U << 16
+  ret i1 %tmp1
+}
+
+define i1 @add_ugecmp_i32_i8(i32 %x) nounwind {
+; CHECK-LABEL: add_ugecmp_i32_i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    add w8, w0, #128 // =128
+; CHECK-NEXT:    cmp w8, #255 // =255
+; CHECK-NEXT:    cset w0, hi
+; CHECK-NEXT:    ret
+  %tmp0 = add i32 %x, 128 ; 1U << (8-1)
+  %tmp1 = icmp uge i32 %tmp0, 256 ; 1U << 8
+  ret i1 %tmp1
+}
+
+define i1 @add_ugecmp_i64_i32(i64 %x) nounwind {
+; CHECK-LABEL: add_ugecmp_i64_i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    orr w8, wzr, #0x80000000
+; CHECK-NEXT:    add x8, x0, x8
+; CHECK-NEXT:    lsr x8, x8, #32
+; CHECK-NEXT:    cmp x8, #0 // =0
+; CHECK-NEXT:    cset w0, ne
+; CHECK-NEXT:    ret
+  %tmp0 = add i64 %x, 2147483648 ; 1U << (32-1)
+  %tmp1 = icmp uge i64 %tmp0, 4294967296 ; 1U << 32
+  ret i1 %tmp1
+}
+
+define i1 @add_ugecmp_i64_i16(i64 %x) nounwind {
+; CHECK-LABEL: add_ugecmp_i64_i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    add x8, x0, #8, lsl #12 // =32768
+; CHECK-NEXT:    lsr x8, x8, #16
+; CHECK-NEXT:    cmp x8, #0 // =0
+; CHECK-NEXT:    cset w0, ne
+; CHECK-NEXT:    ret
+  %tmp0 = add i64 %x, 32768 ; 1U << (16-1)
+  %tmp1 = icmp uge i64 %tmp0, 65536 ; 1U << 16
+  ret i1 %tmp1
+}
+
+define i1 @add_ugecmp_i64_i8(i64 %x) nounwind {
+; CHECK-LABEL: add_ugecmp_i64_i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    add x8, x0, #128 // =128
+; CHECK-NEXT:    cmp x8, #255 // =255
+; CHECK-NEXT:    cset w0, hi
+; CHECK-NEXT:    ret
+  %tmp0 = add i64 %x, 128 ; 1U << (8-1)
+  %tmp1 = icmp uge i64 %tmp0, 256 ; 1U << 8
+  ret i1 %tmp1
+}
diff --git a/test/CodeGen/AArch64/signed-truncation-check.ll b/test/CodeGen/AArch64/signed-truncation-check.ll
new file mode 100644
index 0000000..ef0d6b0
--- /dev/null
+++ b/test/CodeGen/AArch64/signed-truncation-check.ll
@@ -0,0 +1,258 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s
+
+; https://bugs.llvm.org/show_bug.cgi?id=38149
+
+; We are truncating from wider width, and then sign-extending
+; back to the original width. Then we equality-comparing orig and src.
+; If they don't match, then we had signed truncation during truncation.
+
+; This can be expressed in a several ways in IR:
+;   trunc + sext + icmp eq <- not canonical
+;   shl   + ashr + icmp eq
+;   add          + icmp uge
+;   add          + icmp ult
+; However only the simplest form (with two shifts) gets lowered best.
+
+; ---------------------------------------------------------------------------- ;
+; shl + ashr + icmp eq
+; ---------------------------------------------------------------------------- ;
+
+define i1 @shifts_eqcmp_i16_i8(i16 %x) nounwind {
+; CHECK-LABEL: shifts_eqcmp_i16_i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sxtb w8, w0
+; CHECK-NEXT:    and w8, w8, #0xffff
+; CHECK-NEXT:    cmp w8, w0, uxth
+; CHECK-NEXT:    cset w0, eq
+; CHECK-NEXT:    ret
+  %tmp0 = shl i16 %x, 8 ; 16-8
+  %tmp1 = ashr exact i16 %tmp0, 8 ; 16-8
+  %tmp2 = icmp eq i16 %tmp1, %x
+  ret i1 %tmp2
+}
+
+define i1 @shifts_eqcmp_i32_i16(i32 %x) nounwind {
+; CHECK-LABEL: shifts_eqcmp_i32_i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sxth w8, w0
+; CHECK-NEXT:    cmp w8, w0
+; CHECK-NEXT:    cset w0, eq
+; CHECK-NEXT:    ret
+  %tmp0 = shl i32 %x, 16 ; 32-16
+  %tmp1 = ashr exact i32 %tmp0, 16 ; 32-16
+  %tmp2 = icmp eq i32 %tmp1, %x
+  ret i1 %tmp2
+}
+
+define i1 @shifts_eqcmp_i32_i8(i32 %x) nounwind {
+; CHECK-LABEL: shifts_eqcmp_i32_i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sxtb w8, w0
+; CHECK-NEXT:    cmp w8, w0
+; CHECK-NEXT:    cset w0, eq
+; CHECK-NEXT:    ret
+  %tmp0 = shl i32 %x, 24 ; 32-8
+  %tmp1 = ashr exact i32 %tmp0, 24 ; 32-8
+  %tmp2 = icmp eq i32 %tmp1, %x
+  ret i1 %tmp2
+}
+
+define i1 @shifts_eqcmp_i64_i32(i64 %x) nounwind {
+; CHECK-LABEL: shifts_eqcmp_i64_i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sxtw x8, w0
+; CHECK-NEXT:    cmp x8, x0
+; CHECK-NEXT:    cset w0, eq
+; CHECK-NEXT:    ret
+  %tmp0 = shl i64 %x, 32 ; 64-32
+  %tmp1 = ashr exact i64 %tmp0, 32 ; 64-32
+  %tmp2 = icmp eq i64 %tmp1, %x
+  ret i1 %tmp2
+}
+
+define i1 @shifts_eqcmp_i64_i16(i64 %x) nounwind {
+; CHECK-LABEL: shifts_eqcmp_i64_i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sxth x8, w0
+; CHECK-NEXT:    cmp x8, x0
+; CHECK-NEXT:    cset w0, eq
+; CHECK-NEXT:    ret
+  %tmp0 = shl i64 %x, 48 ; 64-16
+  %tmp1 = ashr exact i64 %tmp0, 48 ; 64-16
+  %tmp2 = icmp eq i64 %tmp1, %x
+  ret i1 %tmp2
+}
+
+define i1 @shifts_eqcmp_i64_i8(i64 %x) nounwind {
+; CHECK-LABEL: shifts_eqcmp_i64_i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sxtb x8, w0
+; CHECK-NEXT:    cmp x8, x0
+; CHECK-NEXT:    cset w0, eq
+; CHECK-NEXT:    ret
+  %tmp0 = shl i64 %x, 56 ; 64-8
+  %tmp1 = ashr exact i64 %tmp0, 56 ; 64-8
+  %tmp2 = icmp eq i64 %tmp1, %x
+  ret i1 %tmp2
+}
+
+; ---------------------------------------------------------------------------- ;
+; add + icmp uge
+; ---------------------------------------------------------------------------- ;
+
+define i1 @add_ugecmp_i16_i8(i16 %x) nounwind {
+; CHECK-LABEL: add_ugecmp_i16_i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub w8, w0, #128 // =128
+; CHECK-NEXT:    ubfx w8, w8, #8, #8
+; CHECK-NEXT:    cmp w8, #254 // =254
+; CHECK-NEXT:    cset w0, hi
+; CHECK-NEXT:    ret
+  %tmp0 = add i16 %x, -128 ; ~0U << (8-1)
+  %tmp1 = icmp uge i16 %tmp0, -256 ; ~0U << 8
+  ret i1 %tmp1
+}
+
+define i1 @add_ugecmp_i32_i16(i32 %x) nounwind {
+; CHECK-LABEL: add_ugecmp_i32_i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub w8, w0, #8, lsl #12 // =32768
+; CHECK-NEXT:    orr w9, wzr, #0xfffeffff
+; CHECK-NEXT:    cmp w8, w9
+; CHECK-NEXT:    cset w0, hi
+; CHECK-NEXT:    ret
+  %tmp0 = add i32 %x, -32768 ; ~0U << (16-1)
+  %tmp1 = icmp uge i32 %tmp0, -65536 ; ~0U << 16
+  ret i1 %tmp1
+}
+
+define i1 @add_ugecmp_i32_i8(i32 %x) nounwind {
+; CHECK-LABEL: add_ugecmp_i32_i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub w8, w0, #128 // =128
+; CHECK-NEXT:    cmn w8, #257 // =257
+; CHECK-NEXT:    cset w0, hi
+; CHECK-NEXT:    ret
+  %tmp0 = add i32 %x, -128 ; ~0U << (8-1)
+  %tmp1 = icmp uge i32 %tmp0, -256 ; ~0U << 8
+  ret i1 %tmp1
+}
+
+define i1 @add_ugecmp_i64_i32(i64 %x) nounwind {
+; CHECK-LABEL: add_ugecmp_i64_i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov x8, #-2147483648
+; CHECK-NEXT:    add x8, x0, x8
+; CHECK-NEXT:    orr x9, xzr, #0xfffffffeffffffff
+; CHECK-NEXT:    cmp x8, x9
+; CHECK-NEXT:    cset w0, hi
+; CHECK-NEXT:    ret
+  %tmp0 = add i64 %x, -2147483648 ; ~0U << (32-1)
+  %tmp1 = icmp uge i64 %tmp0, -4294967296 ; ~0U << 32
+  ret i1 %tmp1
+}
+
+define i1 @add_ugecmp_i64_i16(i64 %x) nounwind {
+; CHECK-LABEL: add_ugecmp_i64_i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub x8, x0, #8, lsl #12 // =32768
+; CHECK-NEXT:    orr x9, xzr, #0xfffffffffffeffff
+; CHECK-NEXT:    cmp x8, x9
+; CHECK-NEXT:    cset w0, hi
+; CHECK-NEXT:    ret
+  %tmp0 = add i64 %x, -32768 ; ~0U << (16-1)
+  %tmp1 = icmp uge i64 %tmp0, -65536 ; ~0U << 16
+  ret i1 %tmp1
+}
+
+define i1 @add_ugecmp_i64_i8(i64 %x) nounwind {
+; CHECK-LABEL: add_ugecmp_i64_i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub x8, x0, #128 // =128
+; CHECK-NEXT:    cmn x8, #257 // =257
+; CHECK-NEXT:    cset w0, hi
+; CHECK-NEXT:    ret
+  %tmp0 = add i64 %x, -128 ; ~0U << (8-1)
+  %tmp1 = icmp uge i64 %tmp0, -256 ; ~0U << 8
+  ret i1 %tmp1
+}
+
+; ---------------------------------------------------------------------------- ;
+; add + icmp ult
+; ---------------------------------------------------------------------------- ;
+
+define i1 @add_ultcmp_i16_i8(i16 %x) nounwind {
+; CHECK-LABEL: add_ultcmp_i16_i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    add w8, w0, #128 // =128
+; CHECK-NEXT:    and w8, w8, #0xffff
+; CHECK-NEXT:    cmp w8, #256 // =256
+; CHECK-NEXT:    cset w0, lo
+; CHECK-NEXT:    ret
+  %tmp0 = add i16 %x, 128 ; 1U << (8-1)
+  %tmp1 = icmp ult i16 %tmp0, 256 ; 1U << 8
+  ret i1 %tmp1
+}
+
+define i1 @add_ultcmp_i32_i16(i32 %x) nounwind {
+; CHECK-LABEL: add_ultcmp_i32_i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    add w8, w0, #8, lsl #12 // =32768
+; CHECK-NEXT:    cmp w8, #16, lsl #12 // =65536
+; CHECK-NEXT:    cset w0, lo
+; CHECK-NEXT:    ret
+  %tmp0 = add i32 %x, 32768 ; 1U << (16-1)
+  %tmp1 = icmp ult i32 %tmp0, 65536 ; 1U << 16
+  ret i1 %tmp1
+}
+
+define i1 @add_ultcmp_i32_i8(i32 %x) nounwind {
+; CHECK-LABEL: add_ultcmp_i32_i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    add w8, w0, #128 // =128
+; CHECK-NEXT:    cmp w8, #256 // =256
+; CHECK-NEXT:    cset w0, lo
+; CHECK-NEXT:    ret
+  %tmp0 = add i32 %x, 128 ; 1U << (8-1)
+  %tmp1 = icmp ult i32 %tmp0, 256 ; 1U << 8
+  ret i1 %tmp1
+}
+
+define i1 @add_ultcmp_i64_i32(i64 %x) nounwind {
+; CHECK-LABEL: add_ultcmp_i64_i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    orr w8, wzr, #0x80000000
+; CHECK-NEXT:    add x8, x0, x8
+; CHECK-NEXT:    lsr x8, x8, #32
+; CHECK-NEXT:    cmp x8, #0 // =0
+; CHECK-NEXT:    cset w0, eq
+; CHECK-NEXT:    ret
+  %tmp0 = add i64 %x, 2147483648 ; 1U << (32-1)
+  %tmp1 = icmp ult i64 %tmp0, 4294967296 ; 1U << 32
+  ret i1 %tmp1
+}
+
+define i1 @add_ultcmp_i64_i16(i64 %x) nounwind {
+; CHECK-LABEL: add_ultcmp_i64_i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    add x8, x0, #8, lsl #12 // =32768
+; CHECK-NEXT:    cmp x8, #16, lsl #12 // =65536
+; CHECK-NEXT:    cset w0, lo
+; CHECK-NEXT:    ret
+  %tmp0 = add i64 %x, 32768 ; 1U << (16-1)
+  %tmp1 = icmp ult i64 %tmp0, 65536 ; 1U << 16
+  ret i1 %tmp1
+}
+
+define i1 @add_ultcmp_i64_i8(i64 %x) nounwind {
+; CHECK-LABEL: add_ultcmp_i64_i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    add x8, x0, #128 // =128
+; CHECK-NEXT:    cmp x8, #256 // =256
+; CHECK-NEXT:    cset w0, lo
+; CHECK-NEXT:    ret
+  %tmp0 = add i64 %x, 128 ; 1U << (8-1)
+  %tmp1 = icmp ult i64 %tmp0, 256 ; 1U << 8
+  ret i1 %tmp1
+}
diff --git a/test/CodeGen/X86/lack-of-signed-truncation-check.ll b/test/CodeGen/X86/lack-of-signed-truncation-check.ll
new file mode 100644
index 0000000..7b7b177
--- /dev/null
+++ b/test/CodeGen/X86/lack-of-signed-truncation-check.ll
@@ -0,0 +1,430 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=i686-unknown-linux-gnu   < %s | FileCheck %s --check-prefixes=CHECK,X86,NOBMI2,X86-NOBMI2,FALLBACK0,X86-FALLBACK0
+; RUN: llc -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s --check-prefixes=CHECK,X64,NOBMI2,X64-NOBMI2,FALLBACK0,X64-FALLBACK0
+
+; https://bugs.llvm.org/show_bug.cgi?id=38149
+
+; We are truncating from wider width, and then sign-extending
+; back to the original width. Then we inequality-comparing orig and src.
+; If they don't match, then we had signed truncation during truncation.
+
+; This can be expressed in a several ways in IR:
+;   trunc + sext + icmp ne <- not canonical
+;   shl   + ashr + icmp ne
+;   add          + icmp ult
+;   add          + icmp uge
+; However only the simplest form (with two shifts) gets lowered best.
+
+; ---------------------------------------------------------------------------- ;
+; shl + ashr + icmp ne
+; ---------------------------------------------------------------------------- ;
+
+define i1 @shifts_necmp_i16_i8(i16 %x) nounwind {
+; X86-LABEL: shifts_necmp_i16_i8:
+; X86:       # %bb.0:
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movsbl %al, %ecx
+; X86-NEXT:    cmpw %ax, %cx
+; X86-NEXT:    setne %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: shifts_necmp_i16_i8:
+; X64:       # %bb.0:
+; X64-NEXT:    movsbl %dil, %eax
+; X64-NEXT:    cmpw %di, %ax
+; X64-NEXT:    setne %al
+; X64-NEXT:    retq
+  %tmp0 = shl i16 %x, 8 ; 16-8
+  %tmp1 = ashr exact i16 %tmp0, 8 ; 16-8
+  %tmp2 = icmp ne i16 %tmp1, %x
+  ret i1 %tmp2
+}
+
+define i1 @shifts_necmp_i32_i16(i32 %x) nounwind {
+; X86-LABEL: shifts_necmp_i32_i16:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movswl %ax, %ecx
+; X86-NEXT:    cmpl %eax, %ecx
+; X86-NEXT:    setne %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: shifts_necmp_i32_i16:
+; X64:       # %bb.0:
+; X64-NEXT:    movswl %di, %eax
+; X64-NEXT:    cmpl %edi, %eax
+; X64-NEXT:    setne %al
+; X64-NEXT:    retq
+  %tmp0 = shl i32 %x, 16 ; 32-16
+  %tmp1 = ashr exact i32 %tmp0, 16 ; 32-16
+  %tmp2 = icmp ne i32 %tmp1, %x
+  ret i1 %tmp2
+}
+
+define i1 @shifts_necmp_i32_i8(i32 %x) nounwind {
+; X86-LABEL: shifts_necmp_i32_i8:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movsbl %al, %ecx
+; X86-NEXT:    cmpl %eax, %ecx
+; X86-NEXT:    setne %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: shifts_necmp_i32_i8:
+; X64:       # %bb.0:
+; X64-NEXT:    movsbl %dil, %eax
+; X64-NEXT:    cmpl %edi, %eax
+; X64-NEXT:    setne %al
+; X64-NEXT:    retq
+  %tmp0 = shl i32 %x, 24 ; 32-8
+  %tmp1 = ashr exact i32 %tmp0, 24 ; 32-8
+  %tmp2 = icmp ne i32 %tmp1, %x
+  ret i1 %tmp2
+}
+
+define i1 @shifts_necmp_i64_i32(i64 %x) nounwind {
+; X86-LABEL: shifts_necmp_i64_i32:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    sarl $31, %eax
+; X86-NEXT:    xorl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    setne %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: shifts_necmp_i64_i32:
+; X64:       # %bb.0:
+; X64-NEXT:    movslq %edi, %rax
+; X64-NEXT:    cmpq %rdi, %rax
+; X64-NEXT:    setne %al
+; X64-NEXT:    retq
+  %tmp0 = shl i64 %x, 32 ; 64-32
+  %tmp1 = ashr exact i64 %tmp0, 32 ; 64-32
+  %tmp2 = icmp ne i64 %tmp1, %x
+  ret i1 %tmp2
+}
+
+define i1 @shifts_necmp_i64_i16(i64 %x) nounwind {
+; X86-LABEL: shifts_necmp_i64_i16:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movswl %ax, %ecx
+; X86-NEXT:    movl %ecx, %edx
+; X86-NEXT:    sarl $31, %edx
+; X86-NEXT:    xorl %eax, %ecx
+; X86-NEXT:    xorl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    orl %ecx, %edx
+; X86-NEXT:    setne %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: shifts_necmp_i64_i16:
+; X64:       # %bb.0:
+; X64-NEXT:    movswq %di, %rax
+; X64-NEXT:    cmpq %rdi, %rax
+; X64-NEXT:    setne %al
+; X64-NEXT:    retq
+  %tmp0 = shl i64 %x, 48 ; 64-16
+  %tmp1 = ashr exact i64 %tmp0, 48 ; 64-16
+  %tmp2 = icmp ne i64 %tmp1, %x
+  ret i1 %tmp2
+}
+
+define i1 @shifts_necmp_i64_i8(i64 %x) nounwind {
+; X86-LABEL: shifts_necmp_i64_i8:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movsbl %al, %ecx
+; X86-NEXT:    movl %ecx, %edx
+; X86-NEXT:    sarl $31, %edx
+; X86-NEXT:    xorl %eax, %ecx
+; X86-NEXT:    xorl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    orl %ecx, %edx
+; X86-NEXT:    setne %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: shifts_necmp_i64_i8:
+; X64:       # %bb.0:
+; X64-NEXT:    movsbq %dil, %rax
+; X64-NEXT:    cmpq %rdi, %rax
+; X64-NEXT:    setne %al
+; X64-NEXT:    retq
+  %tmp0 = shl i64 %x, 56 ; 64-8
+  %tmp1 = ashr exact i64 %tmp0, 56 ; 64-8
+  %tmp2 = icmp ne i64 %tmp1, %x
+  ret i1 %tmp2
+}
+
+; ---------------------------------------------------------------------------- ;
+; add + icmp ult
+; ---------------------------------------------------------------------------- ;
+
+define i1 @add_ultcmp_i16_i8(i16 %x) nounwind {
+; X86-LABEL: add_ultcmp_i16_i8:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    addl $-128, %eax
+; X86-NEXT:    movzwl %ax, %eax
+; X86-NEXT:    cmpl $65280, %eax # imm = 0xFF00
+; X86-NEXT:    setb %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: add_ultcmp_i16_i8:
+; X64:       # %bb.0:
+; X64-NEXT:    addl $-128, %edi
+; X64-NEXT:    movzwl %di, %eax
+; X64-NEXT:    cmpl $65280, %eax # imm = 0xFF00
+; X64-NEXT:    setb %al
+; X64-NEXT:    retq
+  %tmp0 = add i16 %x, -128 ; ~0U << (8-1)
+  %tmp1 = icmp ult i16 %tmp0, -256 ; ~0U << 8
+  ret i1 %tmp1
+}
+
+define i1 @add_ultcmp_i32_i16(i32 %x) nounwind {
+; X86-LABEL: add_ultcmp_i32_i16:
+; X86:       # %bb.0:
+; X86-NEXT:    movl $-32768, %eax # imm = 0x8000
+; X86-NEXT:    addl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    cmpl $-65536, %eax # imm = 0xFFFF0000
+; X86-NEXT:    setb %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: add_ultcmp_i32_i16:
+; X64:       # %bb.0:
+; X64-NEXT:    addl $-32768, %edi # imm = 0x8000
+; X64-NEXT:    cmpl $-65536, %edi # imm = 0xFFFF0000
+; X64-NEXT:    setb %al
+; X64-NEXT:    retq
+  %tmp0 = add i32 %x, -32768 ; ~0U << (16-1)
+  %tmp1 = icmp ult i32 %tmp0, -65536 ; ~0U << 16
+  ret i1 %tmp1
+}
+
+define i1 @add_ultcmp_i32_i8(i32 %x) nounwind {
+; X86-LABEL: add_ultcmp_i32_i8:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    addl $-128, %eax
+; X86-NEXT:    cmpl $-256, %eax
+; X86-NEXT:    setb %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: add_ultcmp_i32_i8:
+; X64:       # %bb.0:
+; X64-NEXT:    addl $-128, %edi
+; X64-NEXT:    cmpl $-256, %edi
+; X64-NEXT:    setb %al
+; X64-NEXT:    retq
+  %tmp0 = add i32 %x, -128 ; ~0U << (8-1)
+  %tmp1 = icmp ult i32 %tmp0, -256 ; ~0U << 8
+  ret i1 %tmp1
+}
+
+define i1 @add_ultcmp_i64_i32(i64 %x) nounwind {
+; X86-LABEL: add_ultcmp_i64_i32:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl $-2147483648, %ecx # imm = 0x80000000
+; X86-NEXT:    addl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    adcl $-1, %eax
+; X86-NEXT:    cmpl $-1, %eax
+; X86-NEXT:    setne %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: add_ultcmp_i64_i32:
+; X64:       # %bb.0:
+; X64-NEXT:    addq $-2147483648, %rdi # imm = 0x80000000
+; X64-NEXT:    movabsq $-4294967296, %rax # imm = 0xFFFFFFFF00000000
+; X64-NEXT:    cmpq %rax, %rdi
+; X64-NEXT:    setb %al
+; X64-NEXT:    retq
+  %tmp0 = add i64 %x, -2147483648 ; ~0U << (32-1)
+  %tmp1 = icmp ult i64 %tmp0, -4294967296 ; ~0U << 32
+  ret i1 %tmp1
+}
+
+define i1 @add_ultcmp_i64_i16(i64 %x) nounwind {
+; X86-LABEL: add_ultcmp_i64_i16:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl $-32768, %ecx # imm = 0x8000
+; X86-NEXT:    addl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    adcl $-1, %eax
+; X86-NEXT:    cmpl $-65536, %ecx # imm = 0xFFFF0000
+; X86-NEXT:    sbbl $-1, %eax
+; X86-NEXT:    setb %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: add_ultcmp_i64_i16:
+; X64:       # %bb.0:
+; X64-NEXT:    addq $-32768, %rdi # imm = 0x8000
+; X64-NEXT:    cmpq $-65536, %rdi # imm = 0xFFFF0000
+; X64-NEXT:    setb %al
+; X64-NEXT:    retq
+  %tmp0 = add i64 %x, -32768 ; ~0U << (16-1)
+  %tmp1 = icmp ult i64 %tmp0, -65536 ; ~0U << 16
+  ret i1 %tmp1
+}
+
+define i1 @add_ultcmp_i64_i8(i64 %x) nounwind {
+; X86-LABEL: add_ultcmp_i64_i8:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    addl $-128, %eax
+; X86-NEXT:    adcl $-1, %ecx
+; X86-NEXT:    cmpl $-256, %eax
+; X86-NEXT:    sbbl $-1, %ecx
+; X86-NEXT:    setb %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: add_ultcmp_i64_i8:
+; X64:       # %bb.0:
+; X64-NEXT:    addq $-128, %rdi
+; X64-NEXT:    cmpq $-256, %rdi
+; X64-NEXT:    setb %al
+; X64-NEXT:    retq
+  %tmp0 = add i64 %x, -128 ; ~0U << (8-1)
+  %tmp1 = icmp ult i64 %tmp0, -256 ; ~0U << 8
+  ret i1 %tmp1
+}
+
+; ---------------------------------------------------------------------------- ;
+; add + icmp uge
+; ---------------------------------------------------------------------------- ;
+
+define i1 @add_ugecmp_i16_i8(i16 %x) nounwind {
+; X86-LABEL: add_ugecmp_i16_i8:
+; X86:       # %bb.0:
+; X86-NEXT:    movl $128, %eax
+; X86-NEXT:    addl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movzwl %ax, %eax
+; X86-NEXT:    cmpl $255, %eax
+; X86-NEXT:    seta %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: add_ugecmp_i16_i8:
+; X64:       # %bb.0:
+; X64-NEXT:    subl $-128, %edi
+; X64-NEXT:    movzwl %di, %eax
+; X64-NEXT:    cmpl $255, %eax
+; X64-NEXT:    seta %al
+; X64-NEXT:    retq
+  %tmp0 = add i16 %x, 128 ; 1U << (8-1)
+  %tmp1 = icmp uge i16 %tmp0, 256 ; 1U << 8
+  ret i1 %tmp1
+}
+
+define i1 @add_ugecmp_i32_i16(i32 %x) nounwind {
+; X86-LABEL: add_ugecmp_i32_i16:
+; X86:       # %bb.0:
+; X86-NEXT:    movl $32768, %eax # imm = 0x8000
+; X86-NEXT:    addl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    cmpl $65535, %eax # imm = 0xFFFF
+; X86-NEXT:    seta %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: add_ugecmp_i32_i16:
+; X64:       # %bb.0:
+; X64-NEXT:    addl $32768, %edi # imm = 0x8000
+; X64-NEXT:    cmpl $65535, %edi # imm = 0xFFFF
+; X64-NEXT:    seta %al
+; X64-NEXT:    retq
+  %tmp0 = add i32 %x, 32768 ; 1U << (16-1)
+  %tmp1 = icmp uge i32 %tmp0, 65536 ; 1U << 16
+  ret i1 %tmp1
+}
+
+define i1 @add_ugecmp_i32_i8(i32 %x) nounwind {
+; X86-LABEL: add_ugecmp_i32_i8:
+; X86:       # %bb.0:
+; X86-NEXT:    movl $128, %eax
+; X86-NEXT:    addl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    cmpl $255, %eax
+; X86-NEXT:    seta %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: add_ugecmp_i32_i8:
+; X64:       # %bb.0:
+; X64-NEXT:    subl $-128, %edi
+; X64-NEXT:    cmpl $255, %edi
+; X64-NEXT:    seta %al
+; X64-NEXT:    retq
+  %tmp0 = add i32 %x, 128 ; 1U << (8-1)
+  %tmp1 = icmp uge i32 %tmp0, 256 ; 1U << 8
+  ret i1 %tmp1
+}
+
+define i1 @add_ugecmp_i64_i32(i64 %x) nounwind {
+; X86-LABEL: add_ugecmp_i64_i32:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl $-2147483648, %ecx # imm = 0x80000000
+; X86-NEXT:    addl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    adcl $0, %eax
+; X86-NEXT:    setne %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: add_ugecmp_i64_i32:
+; X64:       # %bb.0:
+; X64-NEXT:    subq $-2147483648, %rdi # imm = 0x80000000
+; X64-NEXT:    shrq $32, %rdi
+; X64-NEXT:    setne %al
+; X64-NEXT:    retq
+  %tmp0 = add i64 %x, 2147483648 ; 1U << (32-1)
+  %tmp1 = icmp uge i64 %tmp0, 4294967296 ; 1U << 32
+  ret i1 %tmp1
+}
+
+define i1 @add_ugecmp_i64_i16(i64 %x) nounwind {
+; X86-LABEL: add_ugecmp_i64_i16:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl $32768, %ecx # imm = 0x8000
+; X86-NEXT:    addl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    adcl $0, %eax
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    movl $65535, %esi # imm = 0xFFFF
+; X86-NEXT:    cmpl %ecx, %esi
+; X86-NEXT:    sbbl %eax, %edx
+; X86-NEXT:    setb %al
+; X86-NEXT:    popl %esi
+; X86-NEXT:    retl
+;
+; X64-LABEL: add_ugecmp_i64_i16:
+; X64:       # %bb.0:
+; X64-NEXT:    addq $32768, %rdi # imm = 0x8000
+; X64-NEXT:    cmpq $65535, %rdi # imm = 0xFFFF
+; X64-NEXT:    seta %al
+; X64-NEXT:    retq
+  %tmp0 = add i64 %x, 32768 ; 1U << (16-1)
+  %tmp1 = icmp uge i64 %tmp0, 65536 ; 1U << 16
+  ret i1 %tmp1
+}
+
+define i1 @add_ugecmp_i64_i8(i64 %x) nounwind {
+; X86-LABEL: add_ugecmp_i64_i8:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl $128, %ecx
+; X86-NEXT:    addl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    adcl $0, %eax
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    movl $255, %esi
+; X86-NEXT:    cmpl %ecx, %esi
+; X86-NEXT:    sbbl %eax, %edx
+; X86-NEXT:    setb %al
+; X86-NEXT:    popl %esi
+; X86-NEXT:    retl
+;
+; X64-LABEL: add_ugecmp_i64_i8:
+; X64:       # %bb.0:
+; X64-NEXT:    subq $-128, %rdi
+; X64-NEXT:    cmpq $255, %rdi
+; X64-NEXT:    seta %al
+; X64-NEXT:    retq
+  %tmp0 = add i64 %x, 128 ; 1U << (8-1)
+  %tmp1 = icmp uge i64 %tmp0, 256 ; 1U << 8
+  ret i1 %tmp1
+}
diff --git a/test/CodeGen/X86/signed-truncation-check.ll b/test/CodeGen/X86/signed-truncation-check.ll
new file mode 100644
index 0000000..c5de304
--- /dev/null
+++ b/test/CodeGen/X86/signed-truncation-check.ll
@@ -0,0 +1,426 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=i686-unknown-linux-gnu   < %s | FileCheck %s --check-prefixes=CHECK,X86,NOBMI2,X86-NOBMI2,FALLBACK0,X86-FALLBACK0
+; RUN: llc -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s --check-prefixes=CHECK,X64,NOBMI2,X64-NOBMI2,FALLBACK0,X64-FALLBACK0
+
+; https://bugs.llvm.org/show_bug.cgi?id=38149
+
+; We are truncating from wider width, and then sign-extending
+; back to the original width. Then we equality-comparing orig and src.
+; If they don't match, then we had signed truncation during truncation.
+
+; This can be expressed in a several ways in IR:
+;   trunc + sext + icmp eq <- not canonical
+;   shl   + ashr + icmp eq
+;   add          + icmp uge
+;   add          + icmp ult
+; However only the simplest form (with two shifts) gets lowered best.
+
+; ---------------------------------------------------------------------------- ;
+; shl + ashr + icmp eq
+; ---------------------------------------------------------------------------- ;
+
+define i1 @shifts_eqcmp_i16_i8(i16 %x) nounwind {
+; X86-LABEL: shifts_eqcmp_i16_i8:
+; X86:       # %bb.0:
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movsbl %al, %ecx
+; X86-NEXT:    cmpw %ax, %cx
+; X86-NEXT:    sete %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: shifts_eqcmp_i16_i8:
+; X64:       # %bb.0:
+; X64-NEXT:    movsbl %dil, %eax
+; X64-NEXT:    cmpw %di, %ax
+; X64-NEXT:    sete %al
+; X64-NEXT:    retq
+  %tmp0 = shl i16 %x, 8 ; 16-8
+  %tmp1 = ashr exact i16 %tmp0, 8 ; 16-8
+  %tmp2 = icmp eq i16 %tmp1, %x
+  ret i1 %tmp2
+}
+
+define i1 @shifts_eqcmp_i32_i16(i32 %x) nounwind {
+; X86-LABEL: shifts_eqcmp_i32_i16:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movswl %ax, %ecx
+; X86-NEXT:    cmpl %eax, %ecx
+; X86-NEXT:    sete %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: shifts_eqcmp_i32_i16:
+; X64:       # %bb.0:
+; X64-NEXT:    movswl %di, %eax
+; X64-NEXT:    cmpl %edi, %eax
+; X64-NEXT:    sete %al
+; X64-NEXT:    retq
+  %tmp0 = shl i32 %x, 16 ; 32-16
+  %tmp1 = ashr exact i32 %tmp0, 16 ; 32-16
+  %tmp2 = icmp eq i32 %tmp1, %x
+  ret i1 %tmp2
+}
+
+define i1 @shifts_eqcmp_i32_i8(i32 %x) nounwind {
+; X86-LABEL: shifts_eqcmp_i32_i8:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movsbl %al, %ecx
+; X86-NEXT:    cmpl %eax, %ecx
+; X86-NEXT:    sete %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: shifts_eqcmp_i32_i8:
+; X64:       # %bb.0:
+; X64-NEXT:    movsbl %dil, %eax
+; X64-NEXT:    cmpl %edi, %eax
+; X64-NEXT:    sete %al
+; X64-NEXT:    retq
+  %tmp0 = shl i32 %x, 24 ; 32-8
+  %tmp1 = ashr exact i32 %tmp0, 24 ; 32-8
+  %tmp2 = icmp eq i32 %tmp1, %x
+  ret i1 %tmp2
+}
+
+define i1 @shifts_eqcmp_i64_i32(i64 %x) nounwind {
+; X86-LABEL: shifts_eqcmp_i64_i32:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    sarl $31, %eax
+; X86-NEXT:    xorl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    sete %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: shifts_eqcmp_i64_i32:
+; X64:       # %bb.0:
+; X64-NEXT:    movslq %edi, %rax
+; X64-NEXT:    cmpq %rdi, %rax
+; X64-NEXT:    sete %al
+; X64-NEXT:    retq
+  %tmp0 = shl i64 %x, 32 ; 64-32
+  %tmp1 = ashr exact i64 %tmp0, 32 ; 64-32
+  %tmp2 = icmp eq i64 %tmp1, %x
+  ret i1 %tmp2
+}
+
+define i1 @shifts_eqcmp_i64_i16(i64 %x) nounwind {
+; X86-LABEL: shifts_eqcmp_i64_i16:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movswl %ax, %ecx
+; X86-NEXT:    movl %ecx, %edx
+; X86-NEXT:    sarl $31, %edx
+; X86-NEXT:    xorl %eax, %ecx
+; X86-NEXT:    xorl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    orl %ecx, %edx
+; X86-NEXT:    sete %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: shifts_eqcmp_i64_i16:
+; X64:       # %bb.0:
+; X64-NEXT:    movswq %di, %rax
+; X64-NEXT:    cmpq %rdi, %rax
+; X64-NEXT:    sete %al
+; X64-NEXT:    retq
+  %tmp0 = shl i64 %x, 48 ; 64-16
+  %tmp1 = ashr exact i64 %tmp0, 48 ; 64-16
+  %tmp2 = icmp eq i64 %tmp1, %x
+  ret i1 %tmp2
+}
+
+define i1 @shifts_eqcmp_i64_i8(i64 %x) nounwind {
+; X86-LABEL: shifts_eqcmp_i64_i8:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movsbl %al, %ecx
+; X86-NEXT:    movl %ecx, %edx
+; X86-NEXT:    sarl $31, %edx
+; X86-NEXT:    xorl %eax, %ecx
+; X86-NEXT:    xorl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    orl %ecx, %edx
+; X86-NEXT:    sete %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: shifts_eqcmp_i64_i8:
+; X64:       # %bb.0:
+; X64-NEXT:    movsbq %dil, %rax
+; X64-NEXT:    cmpq %rdi, %rax
+; X64-NEXT:    sete %al
+; X64-NEXT:    retq
+  %tmp0 = shl i64 %x, 56 ; 64-8
+  %tmp1 = ashr exact i64 %tmp0, 56 ; 64-8
+  %tmp2 = icmp eq i64 %tmp1, %x
+  ret i1 %tmp2
+}
+
+; ---------------------------------------------------------------------------- ;
+; add + icmp uge
+; ---------------------------------------------------------------------------- ;
+
+define i1 @add_ugecmp_i16_i8(i16 %x) nounwind {
+; X86-LABEL: add_ugecmp_i16_i8:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    addl $-128, %eax
+; X86-NEXT:    movzwl %ax, %eax
+; X86-NEXT:    cmpl $65279, %eax # imm = 0xFEFF
+; X86-NEXT:    seta %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: add_ugecmp_i16_i8:
+; X64:       # %bb.0:
+; X64-NEXT:    addl $-128, %edi
+; X64-NEXT:    movzwl %di, %eax
+; X64-NEXT:    cmpl $65279, %eax # imm = 0xFEFF
+; X64-NEXT:    seta %al
+; X64-NEXT:    retq
+  %tmp0 = add i16 %x, -128 ; ~0U << (8-1)
+  %tmp1 = icmp uge i16 %tmp0, -256 ; ~0U << 8
+  ret i1 %tmp1
+}
+
+define i1 @add_ugecmp_i32_i16(i32 %x) nounwind {
+; X86-LABEL: add_ugecmp_i32_i16:
+; X86:       # %bb.0:
+; X86-NEXT:    movl $-32768, %eax # imm = 0x8000
+; X86-NEXT:    addl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    cmpl $-65537, %eax # imm = 0xFFFEFFFF
+; X86-NEXT:    seta %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: add_ugecmp_i32_i16:
+; X64:       # %bb.0:
+; X64-NEXT:    addl $-32768, %edi # imm = 0x8000
+; X64-NEXT:    cmpl $-65537, %edi # imm = 0xFFFEFFFF
+; X64-NEXT:    seta %al
+; X64-NEXT:    retq
+  %tmp0 = add i32 %x, -32768 ; ~0U << (16-1)
+  %tmp1 = icmp uge i32 %tmp0, -65536 ; ~0U << 16
+  ret i1 %tmp1
+}
+
+define i1 @add_ugecmp_i32_i8(i32 %x) nounwind {
+; X86-LABEL: add_ugecmp_i32_i8:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    addl $-128, %eax
+; X86-NEXT:    cmpl $-257, %eax # imm = 0xFEFF
+; X86-NEXT:    seta %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: add_ugecmp_i32_i8:
+; X64:       # %bb.0:
+; X64-NEXT:    addl $-128, %edi
+; X64-NEXT:    cmpl $-257, %edi # imm = 0xFEFF
+; X64-NEXT:    seta %al
+; X64-NEXT:    retq
+  %tmp0 = add i32 %x, -128 ; ~0U << (8-1)
+  %tmp1 = icmp uge i32 %tmp0, -256 ; ~0U << 8
+  ret i1 %tmp1
+}
+
+define i1 @add_ugecmp_i64_i32(i64 %x) nounwind {
+; X86-LABEL: add_ugecmp_i64_i32:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl $-2147483648, %ecx # imm = 0x80000000
+; X86-NEXT:    addl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    adcl $-1, %eax
+; X86-NEXT:    cmpl $-1, %eax
+; X86-NEXT:    sete %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: add_ugecmp_i64_i32:
+; X64:       # %bb.0:
+; X64-NEXT:    addq $-2147483648, %rdi # imm = 0x80000000
+; X64-NEXT:    movabsq $-4294967297, %rax # imm = 0xFFFFFFFEFFFFFFFF
+; X64-NEXT:    cmpq %rax, %rdi
+; X64-NEXT:    seta %al
+; X64-NEXT:    retq
+  %tmp0 = add i64 %x, -2147483648 ; ~0U << (32-1)
+  %tmp1 = icmp uge i64 %tmp0, -4294967296 ; ~0U << 32
+  ret i1 %tmp1
+}
+
+define i1 @add_ugecmp_i64_i16(i64 %x) nounwind {
+; X86-LABEL: add_ugecmp_i64_i16:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl $-32768, %ecx # imm = 0x8000
+; X86-NEXT:    addl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    adcl $-1, %eax
+; X86-NEXT:    movl $-65537, %edx # imm = 0xFFFEFFFF
+; X86-NEXT:    cmpl %ecx, %edx
+; X86-NEXT:    movl $-1, %ecx
+; X86-NEXT:    sbbl %eax, %ecx
+; X86-NEXT:    setb %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: add_ugecmp_i64_i16:
+; X64:       # %bb.0:
+; X64-NEXT:    addq $-32768, %rdi # imm = 0x8000
+; X64-NEXT:    cmpq $-65537, %rdi # imm = 0xFFFEFFFF
+; X64-NEXT:    seta %al
+; X64-NEXT:    retq
+  %tmp0 = add i64 %x, -32768 ; ~0U << (16-1)
+  %tmp1 = icmp uge i64 %tmp0, -65536 ; ~0U << 16
+  ret i1 %tmp1
+}
+
+define i1 @add_ugecmp_i64_i8(i64 %x) nounwind {
+; X86-LABEL: add_ugecmp_i64_i8:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    addl $-128, %eax
+; X86-NEXT:    adcl $-1, %ecx
+; X86-NEXT:    movl $-257, %edx # imm = 0xFEFF
+; X86-NEXT:    cmpl %eax, %edx
+; X86-NEXT:    movl $-1, %eax
+; X86-NEXT:    sbbl %ecx, %eax
+; X86-NEXT:    setb %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: add_ugecmp_i64_i8:
+; X64:       # %bb.0:
+; X64-NEXT:    addq $-128, %rdi
+; X64-NEXT:    cmpq $-257, %rdi # imm = 0xFEFF
+; X64-NEXT:    seta %al
+; X64-NEXT:    retq
+  %tmp0 = add i64 %x, -128 ; ~0U << (8-1)
+  %tmp1 = icmp uge i64 %tmp0, -256 ; ~0U << 8
+  ret i1 %tmp1
+}
+
+; ---------------------------------------------------------------------------- ;
+; add + icmp ult
+; ---------------------------------------------------------------------------- ;
+
+define i1 @add_ultcmp_i16_i8(i16 %x) nounwind {
+; X86-LABEL: add_ultcmp_i16_i8:
+; X86:       # %bb.0:
+; X86-NEXT:    movl $128, %eax
+; X86-NEXT:    addl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movzwl %ax, %eax
+; X86-NEXT:    cmpl $256, %eax # imm = 0x100
+; X86-NEXT:    setb %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: add_ultcmp_i16_i8:
+; X64:       # %bb.0:
+; X64-NEXT:    subl $-128, %edi
+; X64-NEXT:    movzwl %di, %eax
+; X64-NEXT:    cmpl $256, %eax # imm = 0x100
+; X64-NEXT:    setb %al
+; X64-NEXT:    retq
+  %tmp0 = add i16 %x, 128 ; 1U << (8-1)
+  %tmp1 = icmp ult i16 %tmp0, 256 ; 1U << 8
+  ret i1 %tmp1
+}
+
+define i1 @add_ultcmp_i32_i16(i32 %x) nounwind {
+; X86-LABEL: add_ultcmp_i32_i16:
+; X86:       # %bb.0:
+; X86-NEXT:    movl $32768, %eax # imm = 0x8000
+; X86-NEXT:    addl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    cmpl $65536, %eax # imm = 0x10000
+; X86-NEXT:    setb %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: add_ultcmp_i32_i16:
+; X64:       # %bb.0:
+; X64-NEXT:    addl $32768, %edi # imm = 0x8000
+; X64-NEXT:    cmpl $65536, %edi # imm = 0x10000
+; X64-NEXT:    setb %al
+; X64-NEXT:    retq
+  %tmp0 = add i32 %x, 32768 ; 1U << (16-1)
+  %tmp1 = icmp ult i32 %tmp0, 65536 ; 1U << 16
+  ret i1 %tmp1
+}
+
+define i1 @add_ultcmp_i32_i8(i32 %x) nounwind {
+; X86-LABEL: add_ultcmp_i32_i8:
+; X86:       # %bb.0:
+; X86-NEXT:    movl $128, %eax
+; X86-NEXT:    addl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    cmpl $256, %eax # imm = 0x100
+; X86-NEXT:    setb %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: add_ultcmp_i32_i8:
+; X64:       # %bb.0:
+; X64-NEXT:    subl $-128, %edi
+; X64-NEXT:    cmpl $256, %edi # imm = 0x100
+; X64-NEXT:    setb %al
+; X64-NEXT:    retq
+  %tmp0 = add i32 %x, 128 ; 1U << (8-1)
+  %tmp1 = icmp ult i32 %tmp0, 256 ; 1U << 8
+  ret i1 %tmp1
+}
+
+define i1 @add_ultcmp_i64_i32(i64 %x) nounwind {
+; X86-LABEL: add_ultcmp_i64_i32:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl $-2147483648, %ecx # imm = 0x80000000
+; X86-NEXT:    addl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    adcl $0, %eax
+; X86-NEXT:    sete %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: add_ultcmp_i64_i32:
+; X64:       # %bb.0:
+; X64-NEXT:    subq $-2147483648, %rdi # imm = 0x80000000
+; X64-NEXT:    shrq $32, %rdi
+; X64-NEXT:    sete %al
+; X64-NEXT:    retq
+  %tmp0 = add i64 %x, 2147483648 ; 1U << (32-1)
+  %tmp1 = icmp ult i64 %tmp0, 4294967296 ; 1U << 32
+  ret i1 %tmp1
+}
+
+define i1 @add_ultcmp_i64_i16(i64 %x) nounwind {
+; X86-LABEL: add_ultcmp_i64_i16:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl $32768, %ecx # imm = 0x8000
+; X86-NEXT:    addl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    adcl $0, %eax
+; X86-NEXT:    cmpl $65536, %ecx # imm = 0x10000
+; X86-NEXT:    sbbl $0, %eax
+; X86-NEXT:    setb %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: add_ultcmp_i64_i16:
+; X64:       # %bb.0:
+; X64-NEXT:    addq $32768, %rdi # imm = 0x8000
+; X64-NEXT:    cmpq $65536, %rdi # imm = 0x10000
+; X64-NEXT:    setb %al
+; X64-NEXT:    retq
+  %tmp0 = add i64 %x, 32768 ; 1U << (16-1)
+  %tmp1 = icmp ult i64 %tmp0, 65536 ; 1U << 16
+  ret i1 %tmp1
+}
+
+define i1 @add_ultcmp_i64_i8(i64 %x) nounwind {
+; X86-LABEL: add_ultcmp_i64_i8:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl $128, %ecx
+; X86-NEXT:    addl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    adcl $0, %eax
+; X86-NEXT:    cmpl $256, %ecx # imm = 0x100
+; X86-NEXT:    sbbl $0, %eax
+; X86-NEXT:    setb %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: add_ultcmp_i64_i8:
+; X64:       # %bb.0:
+; X64-NEXT:    subq $-128, %rdi
+; X64-NEXT:    cmpq $256, %rdi # imm = 0x100
+; X64-NEXT:    setb %al
+; X64-NEXT:    retq
+  %tmp0 = add i64 %x, 128 ; 1U << (8-1)
+  %tmp1 = icmp ult i64 %tmp0, 256 ; 1U << 8
+  ret i1 %tmp1
+}