[NFC][InstCombine] Tests for "fold variable mask before variable shift-of-trunc" (PR42563)

https://bugs.llvm.org/show_bug.cgi?id=42563

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@375135 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-a.ll b/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-a.ll
new file mode 100644
index 0000000..bfe563d
--- /dev/null
+++ b/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-a.ll
@@ -0,0 +1,273 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt %s -instcombine -S | FileCheck %s
+
+; If we have some pattern that leaves only some low bits set, and then performs
+; left-shift of those bits, we can combine those two shifts into a shift+mask.
+
+; There are many variants to this pattern:
+;   a)  (trunc (x & ((1 << maskNbits) - 1))) << shiftNbits
+; simplify to:
+;   ((trunc(x)) << shiftNbits) & (~(-1 << (maskNbits+shiftNbits)))
+
+; Simple tests.
+
+declare void @use32(i32)
+declare void @use64(i64)
+
+define i32 @t0_basic(i64 %x, i32 %nbits) {
+; CHECK-LABEL: @t0_basic(
+; CHECK-NEXT:    [[T0:%.*]] = add i32 [[NBITS:%.*]], -1
+; CHECK-NEXT:    [[T1:%.*]] = zext i32 [[T0]] to i64
+; CHECK-NEXT:    [[T2:%.*]] = shl i64 1, [[T1]]
+; CHECK-NEXT:    [[T3:%.*]] = add i64 [[T2]], -1
+; CHECK-NEXT:    [[T4:%.*]] = sub i32 32, [[NBITS]]
+; CHECK-NEXT:    call void @use32(i32 [[T0]])
+; CHECK-NEXT:    call void @use64(i64 [[T1]])
+; CHECK-NEXT:    call void @use64(i64 [[T2]])
+; CHECK-NEXT:    call void @use64(i64 [[T3]])
+; CHECK-NEXT:    call void @use32(i32 [[T4]])
+; CHECK-NEXT:    [[T5:%.*]] = and i64 [[T3]], [[X:%.*]]
+; CHECK-NEXT:    [[T6:%.*]] = trunc i64 [[T5]] to i32
+; CHECK-NEXT:    [[T7:%.*]] = shl i32 [[T6]], [[T4]]
+; CHECK-NEXT:    ret i32 [[T7]]
+;
+  %t0 = add i32 %nbits, -1
+  %t1 = zext i32 %t0 to i64
+  %t2 = shl i64 1, %t1 ; shifting by nbits-1
+  %t3 = add i64 %t2, -1
+  %t4 = sub i32 32, %nbits
+
+  call void @use32(i32 %t0)
+  call void @use64(i64 %t1)
+  call void @use64(i64 %t2)
+  call void @use64(i64 %t3)
+  call void @use32(i32 %t4)
+
+  %t5 = and i64 %t3, %x
+  %t6 = trunc i64 %t5 to i32
+  %t7 = shl i32 %t6, %t4
+  ret i32 %t7
+}
+
+; Vectors
+
+declare void @use8xi32(<8 x i32>)
+declare void @use8xi64(<8 x i64>)
+
+define <8 x i32> @t1_vec_splat(<8 x i64> %x, <8 x i32> %nbits) {
+; CHECK-LABEL: @t1_vec_splat(
+; CHECK-NEXT:    [[T0:%.*]] = add <8 x i32> [[NBITS:%.*]], <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
+; CHECK-NEXT:    [[T1:%.*]] = zext <8 x i32> [[T0]] to <8 x i64>
+; CHECK-NEXT:    [[T2:%.*]] = shl <8 x i64> <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>, [[T1]]
+; CHECK-NEXT:    [[T3:%.*]] = add <8 x i64> [[T2]], <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>
+; CHECK-NEXT:    [[T4:%.*]] = sub <8 x i32> <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>, [[NBITS]]
+; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T0]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T1]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T2]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T3]])
+; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T4]])
+; CHECK-NEXT:    [[T5:%.*]] = and <8 x i64> [[T3]], [[X:%.*]]
+; CHECK-NEXT:    [[T6:%.*]] = trunc <8 x i64> [[T5]] to <8 x i32>
+; CHECK-NEXT:    [[T7:%.*]] = shl <8 x i32> [[T6]], [[T4]]
+; CHECK-NEXT:    ret <8 x i32> [[T7]]
+;
+  %t0 = add <8 x i32> %nbits, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
+  %t1 = zext <8 x i32> %t0 to <8 x i64>
+  %t2 = shl <8 x i64> <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>, %t1 ; shifting by nbits-1
+  %t3 = add <8 x i64> %t2, <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>
+  %t4 = sub <8 x i32> <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>, %nbits
+
+  call void @use8xi32(<8 x i32> %t0)
+  call void @use8xi64(<8 x i64> %t1)
+  call void @use8xi64(<8 x i64> %t2)
+  call void @use8xi64(<8 x i64> %t3)
+  call void @use8xi32(<8 x i32> %t4)
+
+  %t5 = and <8 x i64> %t3, %x
+  %t6 = trunc <8 x i64> %t5 to <8 x i32>
+  %t7 = shl <8 x i32> %t6, %t4
+  ret <8 x i32> %t7
+}
+
+define <8 x i32> @t2_vec_splat_undef(<8 x i64> %x, <8 x i32> %nbits) {
+; CHECK-LABEL: @t2_vec_splat_undef(
+; CHECK-NEXT:    [[T0:%.*]] = add <8 x i32> [[NBITS:%.*]], <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 undef, i32 -1>
+; CHECK-NEXT:    [[T1:%.*]] = zext <8 x i32> [[T0]] to <8 x i64>
+; CHECK-NEXT:    [[T2:%.*]] = shl <8 x i64> <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 undef, i64 1>, [[T1]]
+; CHECK-NEXT:    [[T3:%.*]] = add <8 x i64> [[T2]], <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 undef, i64 -1>
+; CHECK-NEXT:    [[T4:%.*]] = sub <8 x i32> <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 undef, i32 32>, [[NBITS]]
+; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T0]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T1]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T2]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T3]])
+; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T4]])
+; CHECK-NEXT:    [[T5:%.*]] = and <8 x i64> [[T3]], [[X:%.*]]
+; CHECK-NEXT:    [[T6:%.*]] = trunc <8 x i64> [[T5]] to <8 x i32>
+; CHECK-NEXT:    [[T7:%.*]] = shl <8 x i32> [[T6]], [[T4]]
+; CHECK-NEXT:    ret <8 x i32> [[T7]]
+;
+  %t0 = add <8 x i32> %nbits, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 undef, i32 -1>
+  %t1 = zext <8 x i32> %t0 to <8 x i64>
+  %t2 = shl <8 x i64> <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 undef, i64 1>, %t1 ; shifting by nbits-1
+  %t3 = add <8 x i64> %t2, <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 undef, i64 -1>
+  %t4 = sub <8 x i32> <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 undef, i32 32>, %nbits
+
+  call void @use8xi32(<8 x i32> %t0)
+  call void @use8xi64(<8 x i64> %t1)
+  call void @use8xi64(<8 x i64> %t2)
+  call void @use8xi64(<8 x i64> %t3)
+  call void @use8xi32(<8 x i32> %t4)
+
+  %t5 = and <8 x i64> %t3, %x
+  %t6 = trunc <8 x i64> %t5 to <8 x i32>
+  %t7 = shl <8 x i32> %t6, %t4
+  ret <8 x i32> %t7
+}
+
+define <8 x i32> @t3_vec_nonsplat(<8 x i64> %x, <8 x i32> %nbits) {
+; CHECK-LABEL: @t3_vec_nonsplat(
+; CHECK-NEXT:    [[T0:%.*]] = add <8 x i32> [[NBITS:%.*]], <i32 -33, i32 -32, i32 -31, i32 -1, i32 0, i32 1, i32 31, i32 32>
+; CHECK-NEXT:    [[T1:%.*]] = zext <8 x i32> [[T0]] to <8 x i64>
+; CHECK-NEXT:    [[T2:%.*]] = shl <8 x i64> <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>, [[T1]]
+; CHECK-NEXT:    [[T3:%.*]] = add <8 x i64> [[T2]], <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>
+; CHECK-NEXT:    [[T4:%.*]] = sub <8 x i32> <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>, [[NBITS]]
+; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T0]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T1]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T2]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T3]])
+; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T4]])
+; CHECK-NEXT:    [[T5:%.*]] = and <8 x i64> [[T3]], [[X:%.*]]
+; CHECK-NEXT:    [[T6:%.*]] = trunc <8 x i64> [[T5]] to <8 x i32>
+; CHECK-NEXT:    [[T7:%.*]] = shl <8 x i32> [[T6]], [[T4]]
+; CHECK-NEXT:    ret <8 x i32> [[T7]]
+;
+  %t0 = add <8 x i32> %nbits, <i32 -33, i32 -32, i32 -31, i32 -1, i32 0, i32 1, i32 31, i32 32>
+  %t1 = zext <8 x i32> %t0 to <8 x i64>
+  %t2 = shl <8 x i64> <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>, %t1 ; shifting by nbits-1
+  %t3 = add <8 x i64> %t2, <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>
+  %t4 = sub <8 x i32> <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>, %nbits
+
+  call void @use8xi32(<8 x i32> %t0)
+  call void @use8xi64(<8 x i64> %t1)
+  call void @use8xi64(<8 x i64> %t2)
+  call void @use8xi64(<8 x i64> %t3)
+  call void @use8xi32(<8 x i32> %t4)
+
+  %t5 = and <8 x i64> %t3, %x
+  %t6 = trunc <8 x i64> %t5 to <8 x i32>
+  %t7 = shl <8 x i32> %t6, %t4
+  ret <8 x i32> %t7
+}
+
+; Extra uses
+
+define i32 @n4_extrause0(i64 %x, i32 %nbits) {
+; CHECK-LABEL: @n4_extrause0(
+; CHECK-NEXT:    [[T0:%.*]] = add i32 [[NBITS:%.*]], -1
+; CHECK-NEXT:    [[T1:%.*]] = zext i32 [[T0]] to i64
+; CHECK-NEXT:    [[T2:%.*]] = shl i64 1, [[T1]]
+; CHECK-NEXT:    [[T3:%.*]] = add i64 [[T2]], -1
+; CHECK-NEXT:    [[T4:%.*]] = sub i32 32, [[NBITS]]
+; CHECK-NEXT:    call void @use32(i32 [[T0]])
+; CHECK-NEXT:    call void @use64(i64 [[T1]])
+; CHECK-NEXT:    call void @use64(i64 [[T2]])
+; CHECK-NEXT:    call void @use64(i64 [[T3]])
+; CHECK-NEXT:    call void @use32(i32 [[T4]])
+; CHECK-NEXT:    [[T5:%.*]] = and i64 [[T3]], [[X:%.*]]
+; CHECK-NEXT:    call void @use64(i64 [[T5]])
+; CHECK-NEXT:    [[T6:%.*]] = trunc i64 [[T5]] to i32
+; CHECK-NEXT:    [[T7:%.*]] = shl i32 [[T6]], [[T4]]
+; CHECK-NEXT:    ret i32 [[T7]]
+;
+  %t0 = add i32 %nbits, -1
+  %t1 = zext i32 %t0 to i64
+  %t2 = shl i64 1, %t1 ; shifting by nbits-1
+  %t3 = add i64 %t2, -1
+  %t4 = sub i32 32, %nbits
+
+  call void @use32(i32 %t0)
+  call void @use64(i64 %t1)
+  call void @use64(i64 %t2)
+  call void @use64(i64 %t3)
+  call void @use32(i32 %t4)
+
+  %t5 = and i64 %t3, %x
+  call void @use64(i64 %t5)
+  %t6 = trunc i64 %t5 to i32
+  %t7 = shl i32 %t6, %t4
+  ret i32 %t7
+}
+define i32 @n5_extrause1(i64 %x, i32 %nbits) {
+; CHECK-LABEL: @n5_extrause1(
+; CHECK-NEXT:    [[T0:%.*]] = add i32 [[NBITS:%.*]], -1
+; CHECK-NEXT:    [[T1:%.*]] = zext i32 [[T0]] to i64
+; CHECK-NEXT:    [[T2:%.*]] = shl i64 1, [[T1]]
+; CHECK-NEXT:    [[T3:%.*]] = add i64 [[T2]], -1
+; CHECK-NEXT:    [[T4:%.*]] = sub i32 32, [[NBITS]]
+; CHECK-NEXT:    call void @use32(i32 [[T0]])
+; CHECK-NEXT:    call void @use64(i64 [[T1]])
+; CHECK-NEXT:    call void @use64(i64 [[T2]])
+; CHECK-NEXT:    call void @use64(i64 [[T3]])
+; CHECK-NEXT:    call void @use32(i32 [[T4]])
+; CHECK-NEXT:    [[T5:%.*]] = and i64 [[T3]], [[X:%.*]]
+; CHECK-NEXT:    [[T6:%.*]] = trunc i64 [[T5]] to i32
+; CHECK-NEXT:    call void @use32(i32 [[T6]])
+; CHECK-NEXT:    [[T7:%.*]] = shl i32 [[T6]], [[T4]]
+; CHECK-NEXT:    ret i32 [[T7]]
+;
+  %t0 = add i32 %nbits, -1
+  %t1 = zext i32 %t0 to i64
+  %t2 = shl i64 1, %t1 ; shifting by nbits-1
+  %t3 = add i64 %t2, -1
+  %t4 = sub i32 32, %nbits
+
+  call void @use32(i32 %t0)
+  call void @use64(i64 %t1)
+  call void @use64(i64 %t2)
+  call void @use64(i64 %t3)
+  call void @use32(i32 %t4)
+
+  %t5 = and i64 %t3, %x
+  %t6 = trunc i64 %t5 to i32
+  call void @use32(i32 %t6)
+  %t7 = shl i32 %t6, %t4
+  ret i32 %t7
+}
+define i32 @n6_extrause2(i64 %x, i32 %nbits) {
+; CHECK-LABEL: @n6_extrause2(
+; CHECK-NEXT:    [[T0:%.*]] = add i32 [[NBITS:%.*]], -1
+; CHECK-NEXT:    [[T1:%.*]] = zext i32 [[T0]] to i64
+; CHECK-NEXT:    [[T2:%.*]] = shl i64 1, [[T1]]
+; CHECK-NEXT:    [[T3:%.*]] = add i64 [[T2]], -1
+; CHECK-NEXT:    [[T4:%.*]] = sub i32 32, [[NBITS]]
+; CHECK-NEXT:    call void @use32(i32 [[T0]])
+; CHECK-NEXT:    call void @use64(i64 [[T1]])
+; CHECK-NEXT:    call void @use64(i64 [[T2]])
+; CHECK-NEXT:    call void @use64(i64 [[T3]])
+; CHECK-NEXT:    call void @use32(i32 [[T4]])
+; CHECK-NEXT:    [[T5:%.*]] = and i64 [[T3]], [[X:%.*]]
+; CHECK-NEXT:    call void @use64(i64 [[T5]])
+; CHECK-NEXT:    [[T6:%.*]] = trunc i64 [[T5]] to i32
+; CHECK-NEXT:    call void @use32(i32 [[T6]])
+; CHECK-NEXT:    [[T7:%.*]] = shl i32 [[T6]], [[T4]]
+; CHECK-NEXT:    ret i32 [[T7]]
+;
+  %t0 = add i32 %nbits, -1
+  %t1 = zext i32 %t0 to i64
+  %t2 = shl i64 1, %t1 ; shifting by nbits-1
+  %t3 = add i64 %t2, -1
+  %t4 = sub i32 32, %nbits
+
+  call void @use32(i32 %t0)
+  call void @use64(i64 %t1)
+  call void @use64(i64 %t2)
+  call void @use64(i64 %t3)
+  call void @use32(i32 %t4)
+
+  %t5 = and i64 %t3, %x
+  call void @use64(i64 %t5)
+  %t6 = trunc i64 %t5 to i32
+  call void @use32(i32 %t6)
+  %t7 = shl i32 %t6, %t4
+  ret i32 %t7
+}
diff --git a/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-b.ll b/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-b.ll
new file mode 100644
index 0000000..b1d0aa0
--- /dev/null
+++ b/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-b.ll
@@ -0,0 +1,310 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt %s -instcombine -S | FileCheck %s
+
+; If we have some pattern that leaves only some low bits set, and then performs
+; left-shift of those bits, we can combine those two shifts into a shift+mask.
+
+; There are many variants to this pattern:
+;   b)  (trunc ((x & (~(-1 << maskNbits))))) << shiftNbits
+; simplify to:
+;   ((trunc(x)) << shiftNbits) & (~(-1 << (maskNbits+shiftNbits)))
+
+; Simple tests.
+
+declare void @use32(i32)
+declare void @use64(i64)
+
+define i32 @t0_basic(i64 %x, i32 %nbits) {
+; CHECK-LABEL: @t0_basic(
+; CHECK-NEXT:    [[T0:%.*]] = add i32 [[NBITS:%.*]], -1
+; CHECK-NEXT:    [[T1:%.*]] = zext i32 [[T0]] to i64
+; CHECK-NEXT:    [[T2:%.*]] = shl i64 -1, [[T1]]
+; CHECK-NEXT:    [[T3:%.*]] = xor i64 [[T2]], -1
+; CHECK-NEXT:    [[T4:%.*]] = sub i32 32, [[NBITS]]
+; CHECK-NEXT:    call void @use32(i32 [[T0]])
+; CHECK-NEXT:    call void @use64(i64 [[T1]])
+; CHECK-NEXT:    call void @use64(i64 [[T2]])
+; CHECK-NEXT:    call void @use64(i64 [[T3]])
+; CHECK-NEXT:    call void @use32(i32 [[T4]])
+; CHECK-NEXT:    [[T5:%.*]] = and i64 [[T3]], [[X:%.*]]
+; CHECK-NEXT:    [[T6:%.*]] = trunc i64 [[T5]] to i32
+; CHECK-NEXT:    [[T7:%.*]] = shl i32 [[T6]], [[T4]]
+; CHECK-NEXT:    ret i32 [[T7]]
+;
+  %t0 = add i32 %nbits, -1
+  %t1 = zext i32 %t0 to i64
+  %t2 = shl i64 -1, %t1 ; shifting by nbits-1
+  %t3 = xor i64 %t2, -1
+  %t4 = sub i32 32, %nbits
+
+  call void @use32(i32 %t0)
+  call void @use64(i64 %t1)
+  call void @use64(i64 %t2)
+  call void @use64(i64 %t3)
+  call void @use32(i32 %t4)
+
+  %t5 = and i64 %t3, %x
+  %t6 = trunc i64 %t5 to i32
+  %t7 = shl i32 %t6, %t4
+  ret i32 %t7
+}
+
+; Vectors
+
+declare void @use8xi32(<8 x i32>)
+declare void @use8xi64(<8 x i64>)
+
+define <8 x i32> @t1_vec_splat(<8 x i64> %x, <8 x i32> %nbits) {
+; CHECK-LABEL: @t1_vec_splat(
+; CHECK-NEXT:    [[T0:%.*]] = add <8 x i32> [[NBITS:%.*]], <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
+; CHECK-NEXT:    [[T1:%.*]] = zext <8 x i32> [[T0]] to <8 x i64>
+; CHECK-NEXT:    [[T2:%.*]] = shl <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, [[T1]]
+; CHECK-NEXT:    [[T3:%.*]] = xor <8 x i64> [[T2]], <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>
+; CHECK-NEXT:    [[T4:%.*]] = sub <8 x i32> <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>, [[NBITS]]
+; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T0]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T1]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T2]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T3]])
+; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T4]])
+; CHECK-NEXT:    [[T5:%.*]] = and <8 x i64> [[T3]], [[X:%.*]]
+; CHECK-NEXT:    [[T6:%.*]] = trunc <8 x i64> [[T5]] to <8 x i32>
+; CHECK-NEXT:    [[T7:%.*]] = shl <8 x i32> [[T6]], [[T4]]
+; CHECK-NEXT:    ret <8 x i32> [[T7]]
+;
+  %t0 = add <8 x i32> %nbits, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
+  %t1 = zext <8 x i32> %t0 to <8 x i64>
+  %t2 = shl <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, %t1 ; shifting by nbits-1
+  %t3 = xor <8 x i64> %t2, <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>
+  %t4 = sub <8 x i32> <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>, %nbits
+
+  call void @use8xi32(<8 x i32> %t0)
+  call void @use8xi64(<8 x i64> %t1)
+  call void @use8xi64(<8 x i64> %t2)
+  call void @use8xi64(<8 x i64> %t3)
+  call void @use8xi32(<8 x i32> %t4)
+
+  %t5 = and <8 x i64> %t3, %x
+  %t6 = trunc <8 x i64> %t5 to <8 x i32>
+  %t7 = shl <8 x i32> %t6, %t4
+  ret <8 x i32> %t7
+}
+
+define <8 x i32> @t2_vec_splat_undef(<8 x i64> %x, <8 x i32> %nbits) {
+; CHECK-LABEL: @t2_vec_splat_undef(
+; CHECK-NEXT:    [[T0:%.*]] = add <8 x i32> [[NBITS:%.*]], <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 undef, i32 -1>
+; CHECK-NEXT:    [[T1:%.*]] = zext <8 x i32> [[T0]] to <8 x i64>
+; CHECK-NEXT:    [[T2:%.*]] = shl <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 undef, i64 -1>, [[T1]]
+; CHECK-NEXT:    [[T3:%.*]] = xor <8 x i64> [[T2]], <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 undef, i64 -1>
+; CHECK-NEXT:    [[T4:%.*]] = sub <8 x i32> <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 undef, i32 32>, [[NBITS]]
+; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T0]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T1]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T2]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T3]])
+; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T4]])
+; CHECK-NEXT:    [[T5:%.*]] = and <8 x i64> [[T3]], [[X:%.*]]
+; CHECK-NEXT:    [[T6:%.*]] = trunc <8 x i64> [[T5]] to <8 x i32>
+; CHECK-NEXT:    [[T7:%.*]] = shl <8 x i32> [[T6]], [[T4]]
+; CHECK-NEXT:    ret <8 x i32> [[T7]]
+;
+  %t0 = add <8 x i32> %nbits, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 undef, i32 -1>
+  %t1 = zext <8 x i32> %t0 to <8 x i64>
+  %t2 = shl <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 undef, i64 -1>, %t1 ; shifting by nbits-1
+  %t3 = xor <8 x i64> %t2, <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 undef, i64 -1>
+  %t4 = sub <8 x i32> <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 undef, i32 32>, %nbits
+
+  call void @use8xi32(<8 x i32> %t0)
+  call void @use8xi64(<8 x i64> %t1)
+  call void @use8xi64(<8 x i64> %t2)
+  call void @use8xi64(<8 x i64> %t3)
+  call void @use8xi32(<8 x i32> %t4)
+
+  %t5 = and <8 x i64> %t3, %x
+  %t6 = trunc <8 x i64> %t5 to <8 x i32>
+  %t7 = shl <8 x i32> %t6, %t4
+  ret <8 x i32> %t7
+}
+
+define <8 x i32> @t3_vec_nonsplat(<8 x i64> %x, <8 x i32> %nbits) {
+; CHECK-LABEL: @t3_vec_nonsplat(
+; CHECK-NEXT:    [[T0:%.*]] = add <8 x i32> [[NBITS:%.*]], <i32 -33, i32 -32, i32 -31, i32 -1, i32 0, i32 1, i32 31, i32 32>
+; CHECK-NEXT:    [[T1:%.*]] = zext <8 x i32> [[T0]] to <8 x i64>
+; CHECK-NEXT:    [[T2:%.*]] = shl <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, [[T1]]
+; CHECK-NEXT:    [[T3:%.*]] = xor <8 x i64> [[T2]], <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>
+; CHECK-NEXT:    [[T4:%.*]] = sub <8 x i32> <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>, [[NBITS]]
+; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T0]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T1]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T2]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T3]])
+; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T4]])
+; CHECK-NEXT:    [[T5:%.*]] = and <8 x i64> [[T3]], [[X:%.*]]
+; CHECK-NEXT:    [[T6:%.*]] = trunc <8 x i64> [[T5]] to <8 x i32>
+; CHECK-NEXT:    [[T7:%.*]] = shl <8 x i32> [[T6]], [[T4]]
+; CHECK-NEXT:    ret <8 x i32> [[T7]]
+;
+  %t0 = add <8 x i32> %nbits, <i32 -33, i32 -32, i32 -31, i32 -1, i32 0, i32 1, i32 31, i32 32>
+  %t1 = zext <8 x i32> %t0 to <8 x i64>
+  %t2 = shl <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, %t1 ; shifting by nbits-1
+  %t3 = xor <8 x i64> %t2, <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>
+  %t4 = sub <8 x i32> <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>, %nbits
+
+  call void @use8xi32(<8 x i32> %t0)
+  call void @use8xi64(<8 x i64> %t1)
+  call void @use8xi64(<8 x i64> %t2)
+  call void @use8xi64(<8 x i64> %t3)
+  call void @use8xi32(<8 x i32> %t4)
+
+  %t5 = and <8 x i64> %t3, %x
+  %t6 = trunc <8 x i64> %t5 to <8 x i32>
+  %t7 = shl <8 x i32> %t6, %t4
+  ret <8 x i32> %t7
+}
+
+; -1 can be truncated.
+
+define i32 @t4_allones_trunc(i64 %x, i32 %nbits) {
+; CHECK-LABEL: @t4_allones_trunc(
+; CHECK-NEXT:    [[T0:%.*]] = add i32 [[NBITS:%.*]], -1
+; CHECK-NEXT:    [[T1:%.*]] = zext i32 [[T0]] to i64
+; CHECK-NEXT:    [[T2:%.*]] = shl i64 -1, [[T1]]
+; CHECK-NEXT:    [[T3:%.*]] = xor i64 [[T2]], 4294967295
+; CHECK-NEXT:    [[T4:%.*]] = sub i32 32, [[NBITS]]
+; CHECK-NEXT:    call void @use32(i32 [[T0]])
+; CHECK-NEXT:    call void @use64(i64 [[T1]])
+; CHECK-NEXT:    call void @use64(i64 [[T2]])
+; CHECK-NEXT:    call void @use64(i64 [[T3]])
+; CHECK-NEXT:    call void @use32(i32 [[T4]])
+; CHECK-NEXT:    [[T5:%.*]] = and i64 [[T3]], [[X:%.*]]
+; CHECK-NEXT:    [[T6:%.*]] = trunc i64 [[T5]] to i32
+; CHECK-NEXT:    [[T7:%.*]] = shl i32 [[T6]], [[T4]]
+; CHECK-NEXT:    ret i32 [[T7]]
+;
+  %t0 = add i32 %nbits, -1
+  %t1 = zext i32 %t0 to i64
+  %t2 = shl i64 -1, %t1 ; shifting by nbits-1
+  %t3 = xor i64 %t2, 4294967295 ; we only care about low 32 bits
+  %t4 = sub i32 32, %nbits
+
+  call void @use32(i32 %t0)
+  call void @use64(i64 %t1)
+  call void @use64(i64 %t2)
+  call void @use64(i64 %t3)
+  call void @use32(i32 %t4)
+
+  %t5 = and i64 %t3, %x
+  %t6 = trunc i64 %t5 to i32
+  %t7 = shl i32 %t6, %t4
+  ret i32 %t7
+}
+
+; Extra uses
+
+define i32 @n5_extrause0(i64 %x, i32 %nbits) {
+; CHECK-LABEL: @n5_extrause0(
+; CHECK-NEXT:    [[T0:%.*]] = add i32 [[NBITS:%.*]], -1
+; CHECK-NEXT:    [[T1:%.*]] = zext i32 [[T0]] to i64
+; CHECK-NEXT:    [[T2:%.*]] = shl i64 -1, [[T1]]
+; CHECK-NEXT:    [[T3:%.*]] = xor i64 [[T2]], -1
+; CHECK-NEXT:    [[T4:%.*]] = sub i32 32, [[NBITS]]
+; CHECK-NEXT:    call void @use32(i32 [[T0]])
+; CHECK-NEXT:    call void @use64(i64 [[T1]])
+; CHECK-NEXT:    call void @use64(i64 [[T2]])
+; CHECK-NEXT:    call void @use64(i64 [[T3]])
+; CHECK-NEXT:    call void @use32(i32 [[T4]])
+; CHECK-NEXT:    [[T5:%.*]] = and i64 [[T3]], [[X:%.*]]
+; CHECK-NEXT:    call void @use64(i64 [[T5]])
+; CHECK-NEXT:    [[T6:%.*]] = trunc i64 [[T5]] to i32
+; CHECK-NEXT:    [[T7:%.*]] = shl i32 [[T6]], [[T4]]
+; CHECK-NEXT:    ret i32 [[T7]]
+;
+  %t0 = add i32 %nbits, -1
+  %t1 = zext i32 %t0 to i64
+  %t2 = shl i64 -1, %t1 ; shifting by nbits-1
+  %t3 = xor i64 %t2, -1
+  %t4 = sub i32 32, %nbits
+
+  call void @use32(i32 %t0)
+  call void @use64(i64 %t1)
+  call void @use64(i64 %t2)
+  call void @use64(i64 %t3)
+  call void @use32(i32 %t4)
+
+  %t5 = and i64 %t3, %x
+  call void @use64(i64 %t5)
+  %t6 = trunc i64 %t5 to i32
+  %t7 = shl i32 %t6, %t4
+  ret i32 %t7
+}
+define i32 @n6_extrause1(i64 %x, i32 %nbits) {
+; CHECK-LABEL: @n6_extrause1(
+; CHECK-NEXT:    [[T0:%.*]] = add i32 [[NBITS:%.*]], -1
+; CHECK-NEXT:    [[T1:%.*]] = zext i32 [[T0]] to i64
+; CHECK-NEXT:    [[T2:%.*]] = shl i64 -1, [[T1]]
+; CHECK-NEXT:    [[T3:%.*]] = xor i64 [[T2]], -1
+; CHECK-NEXT:    [[T4:%.*]] = sub i32 32, [[NBITS]]
+; CHECK-NEXT:    call void @use32(i32 [[T0]])
+; CHECK-NEXT:    call void @use64(i64 [[T1]])
+; CHECK-NEXT:    call void @use64(i64 [[T2]])
+; CHECK-NEXT:    call void @use64(i64 [[T3]])
+; CHECK-NEXT:    call void @use32(i32 [[T4]])
+; CHECK-NEXT:    [[T5:%.*]] = and i64 [[T3]], [[X:%.*]]
+; CHECK-NEXT:    [[T6:%.*]] = trunc i64 [[T5]] to i32
+; CHECK-NEXT:    call void @use32(i32 [[T6]])
+; CHECK-NEXT:    [[T7:%.*]] = shl i32 [[T6]], [[T4]]
+; CHECK-NEXT:    ret i32 [[T7]]
+;
+  %t0 = add i32 %nbits, -1
+  %t1 = zext i32 %t0 to i64
+  %t2 = shl i64 -1, %t1 ; shifting by nbits-1
+  %t3 = xor i64 %t2, -1
+  %t4 = sub i32 32, %nbits
+
+  call void @use32(i32 %t0)
+  call void @use64(i64 %t1)
+  call void @use64(i64 %t2)
+  call void @use64(i64 %t3)
+  call void @use32(i32 %t4)
+
+  %t5 = and i64 %t3, %x
+  %t6 = trunc i64 %t5 to i32
+  call void @use32(i32 %t6)
+  %t7 = shl i32 %t6, %t4
+  ret i32 %t7
+}
+define i32 @n7_extrause2(i64 %x, i32 %nbits) {
+; CHECK-LABEL: @n7_extrause2(
+; CHECK-NEXT:    [[T0:%.*]] = add i32 [[NBITS:%.*]], -1
+; CHECK-NEXT:    [[T1:%.*]] = zext i32 [[T0]] to i64
+; CHECK-NEXT:    [[T2:%.*]] = shl i64 -1, [[T1]]
+; CHECK-NEXT:    [[T3:%.*]] = xor i64 [[T2]], -1
+; CHECK-NEXT:    [[T4:%.*]] = sub i32 32, [[NBITS]]
+; CHECK-NEXT:    call void @use32(i32 [[T0]])
+; CHECK-NEXT:    call void @use64(i64 [[T1]])
+; CHECK-NEXT:    call void @use64(i64 [[T2]])
+; CHECK-NEXT:    call void @use64(i64 [[T3]])
+; CHECK-NEXT:    call void @use32(i32 [[T4]])
+; CHECK-NEXT:    [[T5:%.*]] = and i64 [[T3]], [[X:%.*]]
+; CHECK-NEXT:    call void @use64(i64 [[T5]])
+; CHECK-NEXT:    [[T6:%.*]] = trunc i64 [[T5]] to i32
+; CHECK-NEXT:    call void @use32(i32 [[T6]])
+; CHECK-NEXT:    [[T7:%.*]] = shl i32 [[T6]], [[T4]]
+; CHECK-NEXT:    ret i32 [[T7]]
+;
+  %t0 = add i32 %nbits, -1
+  %t1 = zext i32 %t0 to i64
+  %t2 = shl i64 -1, %t1 ; shifting by nbits-1
+  %t3 = xor i64 %t2, -1
+  %t4 = sub i32 32, %nbits
+
+  call void @use32(i32 %t0)
+  call void @use64(i64 %t1)
+  call void @use64(i64 %t2)
+  call void @use64(i64 %t3)
+  call void @use32(i32 %t4)
+
+  %t5 = and i64 %t3, %x
+  call void @use64(i64 %t5)
+  %t6 = trunc i64 %t5 to i32
+  call void @use32(i32 %t6)
+  %t7 = shl i32 %t6, %t4
+  ret i32 %t7
+}
diff --git a/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-c.ll b/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-c.ll
new file mode 100644
index 0000000..dcc789b
--- /dev/null
+++ b/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-c.ll
@@ -0,0 +1,219 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt %s -instcombine -S | FileCheck %s
+
+; If we have some pattern that leaves only some low bits set, and then performs
+; left-shift of those bits, we can combine those two shifts into a shift+mask.
+
+; There are many variants to this pattern:
+;   c)  (trunc ((x & (-1 >> maskNbits)))) << shiftNbits
+; simplify to:
+;   ((trunc(x)) << shiftNbits) & (-1 >> ((-(maskNbits+shiftNbits))+32))
+
+; Simple tests.
+
+declare void @use32(i32)
+declare void @use64(i64)
+
+define i32 @t0_basic(i64 %x, i32 %nbits) {
+; CHECK-LABEL: @t0_basic(
+; CHECK-NEXT:    [[T0:%.*]] = zext i32 [[NBITS:%.*]] to i64
+; CHECK-NEXT:    [[T1:%.*]] = lshr i64 -1, [[T0]]
+; CHECK-NEXT:    [[T2:%.*]] = add i32 [[NBITS]], -33
+; CHECK-NEXT:    call void @use64(i64 [[T0]])
+; CHECK-NEXT:    call void @use64(i64 [[T1]])
+; CHECK-NEXT:    call void @use32(i32 [[T2]])
+; CHECK-NEXT:    [[T3:%.*]] = and i64 [[T1]], [[X:%.*]]
+; CHECK-NEXT:    [[T4:%.*]] = trunc i64 [[T3]] to i32
+; CHECK-NEXT:    [[T5:%.*]] = shl i32 [[T4]], [[T2]]
+; CHECK-NEXT:    ret i32 [[T5]]
+;
+  %t0 = zext i32 %nbits to i64
+  %t1 = lshr i64 -1, %t0
+  %t2 = add i32 %nbits, -33
+
+  call void @use64(i64 %t0)
+  call void @use64(i64 %t1)
+  call void @use32(i32 %t2)
+
+  %t3 = and i64 %t1, %x
+  %t4 = trunc i64 %t3 to i32
+  %t5 = shl i32 %t4, %t2 ; shift is smaller than mask
+  ret i32 %t5
+}
+
+; Vectors
+
+declare void @use8xi32(<8 x i32>)
+declare void @use8xi64(<8 x i64>)
+
+define <8 x i32> @t1_vec_splat(<8 x i64> %x, <8 x i32> %nbits) {
+; CHECK-LABEL: @t1_vec_splat(
+; CHECK-NEXT:    [[T0:%.*]] = zext <8 x i32> [[NBITS:%.*]] to <8 x i64>
+; CHECK-NEXT:    [[T1:%.*]] = lshr <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, [[T0]]
+; CHECK-NEXT:    [[T2:%.*]] = add <8 x i32> [[NBITS]], <i32 -33, i32 -33, i32 -33, i32 -33, i32 -33, i32 -33, i32 -33, i32 -33>
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T0]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T1]])
+; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T2]])
+; CHECK-NEXT:    [[T3:%.*]] = and <8 x i64> [[T1]], [[X:%.*]]
+; CHECK-NEXT:    [[T4:%.*]] = trunc <8 x i64> [[T3]] to <8 x i32>
+; CHECK-NEXT:    [[T5:%.*]] = shl <8 x i32> [[T4]], [[T2]]
+; CHECK-NEXT:    ret <8 x i32> [[T5]]
+;
+  %t0 = zext <8 x i32> %nbits to <8 x i64>
+  %t1 = lshr <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, %t0
+  %t2 = add <8 x i32> %nbits, <i32 -33, i32 -33, i32 -33, i32 -33, i32 -33, i32 -33, i32 -33, i32 -33>
+
+  call void @use8xi64(<8 x i64> %t0)
+  call void @use8xi64(<8 x i64> %t1)
+  call void @use8xi32(<8 x i32> %t2)
+
+  %t3 = and <8 x i64> %t1, %x
+  %t4 = trunc <8 x i64> %t3 to <8 x i32>
+  %t5 = shl <8 x i32> %t4, %t2 ; shift is smaller than mask
+  ret <8 x i32> %t5
+}
+
+define <8 x i32> @t2_vec_splat_undef(<8 x i64> %x, <8 x i32> %nbits) {
+; CHECK-LABEL: @t2_vec_splat_undef(
+; CHECK-NEXT:    [[T0:%.*]] = zext <8 x i32> [[NBITS:%.*]] to <8 x i64>
+; CHECK-NEXT:    [[T1:%.*]] = lshr <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 undef, i64 -1>, [[T0]]
+; CHECK-NEXT:    [[T2:%.*]] = add <8 x i32> [[NBITS]], <i32 -33, i32 -33, i32 -33, i32 -33, i32 -33, i32 -33, i32 undef, i32 -33>
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T0]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T1]])
+; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T2]])
+; CHECK-NEXT:    [[T3:%.*]] = and <8 x i64> [[T1]], [[X:%.*]]
+; CHECK-NEXT:    [[T4:%.*]] = trunc <8 x i64> [[T3]] to <8 x i32>
+; CHECK-NEXT:    [[T5:%.*]] = shl <8 x i32> [[T4]], [[T2]]
+; CHECK-NEXT:    ret <8 x i32> [[T5]]
+;
+  %t0 = zext <8 x i32> %nbits to <8 x i64>
+  %t1 = lshr <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 undef, i64 -1>, %t0
+  %t2 = add <8 x i32> %nbits, <i32 -33, i32 -33, i32 -33, i32 -33, i32 -33, i32 -33, i32 undef, i32 -33>
+
+  call void @use8xi64(<8 x i64> %t0)
+  call void @use8xi64(<8 x i64> %t1)
+  call void @use8xi32(<8 x i32> %t2)
+
+  %t3 = and <8 x i64> %t1, %x
+  %t4 = trunc <8 x i64> %t3 to <8 x i32>
+  %t5 = shl <8 x i32> %t4, %t2 ; shift is smaller than mask
+  ret <8 x i32> %t5
+}
+
+define <8 x i32> @t3_vec_nonsplat(<8 x i64> %x, <8 x i32> %nbits) {
+; CHECK-LABEL: @t3_vec_nonsplat(
+; CHECK-NEXT:    [[T0:%.*]] = zext <8 x i32> [[NBITS:%.*]] to <8 x i64>
+; CHECK-NEXT:    [[T1:%.*]] = lshr <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 undef, i64 -1>, [[T0]]
+; CHECK-NEXT:    [[T2:%.*]] = add <8 x i32> [[NBITS]], <i32 -64, i32 -63, i32 -33, i32 -32, i32 63, i32 64, i32 undef, i32 65>
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T0]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T1]])
+; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T2]])
+; CHECK-NEXT:    [[T3:%.*]] = and <8 x i64> [[T1]], [[X:%.*]]
+; CHECK-NEXT:    [[T4:%.*]] = trunc <8 x i64> [[T3]] to <8 x i32>
+; CHECK-NEXT:    [[T5:%.*]] = shl <8 x i32> [[T4]], [[T2]]
+; CHECK-NEXT:    ret <8 x i32> [[T5]]
+;
+  %t0 = zext <8 x i32> %nbits to <8 x i64>
+  %t1 = lshr <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 undef, i64 -1>, %t0
+  %t2 = add <8 x i32> %nbits, <i32 -64, i32 -63, i32 -33, i32 -32, i32 63, i32 64, i32 undef, i32 65>
+
+  call void @use8xi64(<8 x i64> %t0)
+  call void @use8xi64(<8 x i64> %t1)
+  call void @use8xi32(<8 x i32> %t2)
+
+  %t3 = and <8 x i64> %t1, %x
+  %t4 = trunc <8 x i64> %t3 to <8 x i32>
+  %t5 = shl <8 x i32> %t4, %t2 ; shift is smaller than mask
+  ret <8 x i32> %t5
+}
+
+; Extra uses.
+
+define i32 @n4_extrause0(i64 %x, i32 %nbits) {
+; CHECK-LABEL: @n4_extrause0(
+; CHECK-NEXT:    [[T0:%.*]] = zext i32 [[NBITS:%.*]] to i64
+; CHECK-NEXT:    [[T1:%.*]] = lshr i64 -1, [[T0]]
+; CHECK-NEXT:    [[T2:%.*]] = add i32 [[NBITS]], -33
+; CHECK-NEXT:    call void @use64(i64 [[T0]])
+; CHECK-NEXT:    call void @use64(i64 [[T1]])
+; CHECK-NEXT:    call void @use32(i32 [[T2]])
+; CHECK-NEXT:    [[T3:%.*]] = and i64 [[T1]], [[X:%.*]]
+; CHECK-NEXT:    call void @use64(i64 [[T3]])
+; CHECK-NEXT:    [[T4:%.*]] = trunc i64 [[T3]] to i32
+; CHECK-NEXT:    [[T5:%.*]] = shl i32 [[T4]], [[T2]]
+; CHECK-NEXT:    ret i32 [[T5]]
+;
+  %t0 = zext i32 %nbits to i64
+  %t1 = lshr i64 -1, %t0
+  %t2 = add i32 %nbits, -33
+
+  call void @use64(i64 %t0)
+  call void @use64(i64 %t1)
+  call void @use32(i32 %t2)
+
+  %t3 = and i64 %t1, %x
+  call void @use64(i64 %t3)
+  %t4 = trunc i64 %t3 to i32
+  %t5 = shl i32 %t4, %t2 ; shift is smaller than mask
+  ret i32 %t5
+}
+
+define i32 @n5_extrause1(i64 %x, i32 %nbits) {
+; CHECK-LABEL: @n5_extrause1(
+; CHECK-NEXT:    [[T0:%.*]] = zext i32 [[NBITS:%.*]] to i64
+; CHECK-NEXT:    [[T1:%.*]] = lshr i64 -1, [[T0]]
+; CHECK-NEXT:    [[T2:%.*]] = add i32 [[NBITS]], -33
+; CHECK-NEXT:    call void @use64(i64 [[T0]])
+; CHECK-NEXT:    call void @use64(i64 [[T1]])
+; CHECK-NEXT:    call void @use32(i32 [[T2]])
+; CHECK-NEXT:    [[T3:%.*]] = and i64 [[T1]], [[X:%.*]]
+; CHECK-NEXT:    [[T4:%.*]] = trunc i64 [[T3]] to i32
+; CHECK-NEXT:    call void @use32(i32 [[T4]])
+; CHECK-NEXT:    [[T5:%.*]] = shl i32 [[T4]], [[T2]]
+; CHECK-NEXT:    ret i32 [[T5]]
+;
+  %t0 = zext i32 %nbits to i64
+  %t1 = lshr i64 -1, %t0
+  %t2 = add i32 %nbits, -33
+
+  call void @use64(i64 %t0)
+  call void @use64(i64 %t1)
+  call void @use32(i32 %t2)
+
+  %t3 = and i64 %t1, %x
+  %t4 = trunc i64 %t3 to i32
+  call void @use32(i32 %t4)
+  %t5 = shl i32 %t4, %t2 ; shift is smaller than mask
+  ret i32 %t5
+}
+
+define i32 @n6_extrause2(i64 %x, i32 %nbits) {
+; CHECK-LABEL: @n6_extrause2(
+; CHECK-NEXT:    [[T0:%.*]] = zext i32 [[NBITS:%.*]] to i64
+; CHECK-NEXT:    [[T1:%.*]] = lshr i64 -1, [[T0]]
+; CHECK-NEXT:    [[T2:%.*]] = add i32 [[NBITS]], -33
+; CHECK-NEXT:    call void @use64(i64 [[T0]])
+; CHECK-NEXT:    call void @use64(i64 [[T1]])
+; CHECK-NEXT:    call void @use32(i32 [[T2]])
+; CHECK-NEXT:    [[T3:%.*]] = and i64 [[T1]], [[X:%.*]]
+; CHECK-NEXT:    call void @use64(i64 [[T3]])
+; CHECK-NEXT:    [[T4:%.*]] = trunc i64 [[T3]] to i32
+; CHECK-NEXT:    call void @use32(i32 [[T4]])
+; CHECK-NEXT:    [[T5:%.*]] = shl i32 [[T4]], [[T2]]
+; CHECK-NEXT:    ret i32 [[T5]]
+;
+  %t0 = zext i32 %nbits to i64
+  %t1 = lshr i64 -1, %t0
+  %t2 = add i32 %nbits, -33
+
+  call void @use64(i64 %t0)
+  call void @use64(i64 %t1)
+  call void @use32(i32 %t2)
+
+  %t3 = and i64 %t1, %x
+  call void @use64(i64 %t3)
+  %t4 = trunc i64 %t3 to i32
+  call void @use32(i32 %t4)
+  %t5 = shl i32 %t4, %t2 ; shift is smaller than mask
+  ret i32 %t5
+}
diff --git a/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-d.ll b/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-d.ll
new file mode 100644
index 0000000..1f95f92
--- /dev/null
+++ b/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-d.ll
@@ -0,0 +1,247 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt %s -instcombine -S | FileCheck %s
+
+; If we have some pattern that leaves only some low bits set, and then performs
+; left-shift of those bits, we can combine those two shifts into a shift+mask.
+
+; There are many variants to this pattern:
+;   d)  (trunc ((x & ((-1 << maskNbits) >> maskNbits)))) << shiftNbits
+; simplify to:
+;   ((trunc(x)) << shiftNbits) & (-1 >> ((-(maskNbits+shiftNbits))+32))
+
+; Simple tests.
+
+declare void @use32(i32)
+declare void @use64(i64)
+
+define i32 @t0_basic(i64 %x, i32 %nbits) {
+; CHECK-LABEL: @t0_basic(
+; CHECK-NEXT:    [[T0:%.*]] = zext i32 [[NBITS:%.*]] to i64
+; CHECK-NEXT:    [[T1:%.*]] = shl i64 -1, [[T0]]
+; CHECK-NEXT:    [[T2:%.*]] = lshr i64 [[T1]], [[T0]]
+; CHECK-NEXT:    [[T3:%.*]] = add i32 [[NBITS]], -33
+; CHECK-NEXT:    call void @use64(i64 [[T0]])
+; CHECK-NEXT:    call void @use64(i64 [[T1]])
+; CHECK-NEXT:    call void @use64(i64 [[T2]])
+; CHECK-NEXT:    call void @use32(i32 [[T3]])
+; CHECK-NEXT:    [[T4:%.*]] = and i64 [[T2]], [[X:%.*]]
+; CHECK-NEXT:    [[T5:%.*]] = trunc i64 [[T4]] to i32
+; CHECK-NEXT:    [[T6:%.*]] = shl i32 [[T5]], [[T3]]
+; CHECK-NEXT:    ret i32 [[T6]]
+;
+  %t0 = zext i32 %nbits to i64
+  %t1 = shl i64 -1, %t0
+  %t2 = lshr i64 %t1, %t0
+  %t3 = add i32 %nbits, -33
+
+  call void @use64(i64 %t0)
+  call void @use64(i64 %t1)
+  call void @use64(i64 %t2)
+  call void @use32(i32 %t3)
+
+  %t4 = and i64 %t2, %x
+  %t5 = trunc i64 %t4 to i32
+  %t6 = shl i32 %t5, %t3 ; shift is smaller than mask
+  ret i32 %t6
+}
+
+; Vectors
+
+declare void @use8xi32(<8 x i32>)
+declare void @use8xi64(<8 x i64>)
+
+define <8 x i32> @t1_vec_splat(<8 x i64> %x, <8 x i32> %nbits) {
+; CHECK-LABEL: @t1_vec_splat(
+; CHECK-NEXT:    [[T0:%.*]] = zext <8 x i32> [[NBITS:%.*]] to <8 x i64>
+; CHECK-NEXT:    [[T1:%.*]] = shl <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, [[T0]]
+; CHECK-NEXT:    [[T2:%.*]] = lshr <8 x i64> [[T1]], [[T0]]
+; CHECK-NEXT:    [[T3:%.*]] = add <8 x i32> [[NBITS]], <i32 -33, i32 -33, i32 -33, i32 -33, i32 -33, i32 -33, i32 -33, i32 -33>
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T0]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T1]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T2]])
+; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T3]])
+; CHECK-NEXT:    [[T4:%.*]] = and <8 x i64> [[T2]], [[X:%.*]]
+; CHECK-NEXT:    [[T5:%.*]] = trunc <8 x i64> [[T4]] to <8 x i32>
+; CHECK-NEXT:    [[T6:%.*]] = shl <8 x i32> [[T5]], [[T3]]
+; CHECK-NEXT:    ret <8 x i32> [[T6]]
+;
+  %t0 = zext <8 x i32> %nbits to <8 x i64>
+  %t1 = shl <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, %t0
+  %t2 = lshr <8 x i64> %t1, %t0
+  %t3 = add <8 x i32> %nbits, <i32 -33, i32 -33, i32 -33, i32 -33, i32 -33, i32 -33, i32 -33, i32 -33>
+
+  call void @use8xi64(<8 x i64> %t0)
+  call void @use8xi64(<8 x i64> %t1)
+  call void @use8xi64(<8 x i64> %t2)
+  call void @use8xi32(<8 x i32> %t3)
+
+  %t4 = and <8 x i64> %t2, %x
+  %t5 = trunc <8 x i64> %t4 to <8 x i32>
+  %t6 = shl <8 x i32> %t5, %t3 ; shift is smaller than mask
+  ret <8 x i32> %t6
+}
+
+define <8 x i32> @t2_vec_splat_undef(<8 x i64> %x, <8 x i32> %nbits) {
+; CHECK-LABEL: @t2_vec_splat_undef(
+; CHECK-NEXT:    [[T0:%.*]] = zext <8 x i32> [[NBITS:%.*]] to <8 x i64>
+; CHECK-NEXT:    [[T1:%.*]] = shl <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 undef, i64 -1>, [[T0]]
+; CHECK-NEXT:    [[T2:%.*]] = lshr <8 x i64> [[T1]], [[T0]]
+; CHECK-NEXT:    [[T3:%.*]] = add <8 x i32> [[NBITS]], <i32 -33, i32 -33, i32 -33, i32 -33, i32 -33, i32 -33, i32 undef, i32 -33>
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T0]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T1]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T2]])
+; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T3]])
+; CHECK-NEXT:    [[T4:%.*]] = and <8 x i64> [[T2]], [[X:%.*]]
+; CHECK-NEXT:    [[T5:%.*]] = trunc <8 x i64> [[T4]] to <8 x i32>
+; CHECK-NEXT:    [[T6:%.*]] = shl <8 x i32> [[T5]], [[T3]]
+; CHECK-NEXT:    ret <8 x i32> [[T6]]
+;
+  %t0 = zext <8 x i32> %nbits to <8 x i64>
+  %t1 = shl <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 undef, i64 -1>, %t0
+  %t2 = lshr <8 x i64> %t1, %t0
+  %t3 = add <8 x i32> %nbits, <i32 -33, i32 -33, i32 -33, i32 -33, i32 -33, i32 -33, i32 undef, i32 -33>
+
+  call void @use8xi64(<8 x i64> %t0)
+  call void @use8xi64(<8 x i64> %t1)
+  call void @use8xi64(<8 x i64> %t2)
+  call void @use8xi32(<8 x i32> %t3)
+
+  %t4 = and <8 x i64> %t2, %x
+  %t5 = trunc <8 x i64> %t4 to <8 x i32>
+  %t6 = shl <8 x i32> %t5, %t3 ; shift is smaller than mask
+  ret <8 x i32> %t6
+}
+
+define <8 x i32> @t3_vec_nonsplat(<8 x i64> %x, <8 x i32> %nbits) {
+; CHECK-LABEL: @t3_vec_nonsplat(
+; CHECK-NEXT:    [[T0:%.*]] = zext <8 x i32> [[NBITS:%.*]] to <8 x i64>
+; CHECK-NEXT:    [[T1:%.*]] = shl <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 undef, i64 -1>, [[T0]]
+; CHECK-NEXT:    [[T2:%.*]] = lshr <8 x i64> [[T1]], [[T0]]
+; CHECK-NEXT:    [[T3:%.*]] = add <8 x i32> [[NBITS]], <i32 -64, i32 -63, i32 -33, i32 -32, i32 63, i32 64, i32 undef, i32 65>
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T0]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T1]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T2]])
+; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T3]])
+; CHECK-NEXT:    [[T4:%.*]] = and <8 x i64> [[T2]], [[X:%.*]]
+; CHECK-NEXT:    [[T5:%.*]] = trunc <8 x i64> [[T4]] to <8 x i32>
+; CHECK-NEXT:    [[T6:%.*]] = shl <8 x i32> [[T5]], [[T3]]
+; CHECK-NEXT:    ret <8 x i32> [[T6]]
+;
+  %t0 = zext <8 x i32> %nbits to <8 x i64>
+  %t1 = shl <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 undef, i64 -1>, %t0
+  %t2 = lshr <8 x i64> %t1, %t0
+  %t3 = add <8 x i32> %nbits, <i32 -64, i32 -63, i32 -33, i32 -32, i32 63, i32 64, i32 undef, i32 65>
+
+  call void @use8xi64(<8 x i64> %t0)
+  call void @use8xi64(<8 x i64> %t1)
+  call void @use8xi64(<8 x i64> %t2)
+  call void @use8xi32(<8 x i32> %t3)
+
+  %t4 = and <8 x i64> %t2, %x
+  %t5 = trunc <8 x i64> %t4 to <8 x i32>
+  %t6 = shl <8 x i32> %t5, %t3 ; shift is smaller than mask
+  ret <8 x i32> %t6
+}
+
+; Extra uses.
+
+define i32 @n4_extrause0(i64 %x, i32 %nbits) {
+; CHECK-LABEL: @n4_extrause0(
+; CHECK-NEXT:    [[T0:%.*]] = zext i32 [[NBITS:%.*]] to i64
+; CHECK-NEXT:    [[T1:%.*]] = shl i64 -1, [[T0]]
+; CHECK-NEXT:    [[T2:%.*]] = lshr i64 [[T1]], [[T0]]
+; CHECK-NEXT:    [[T3:%.*]] = add i32 [[NBITS]], -33
+; CHECK-NEXT:    call void @use64(i64 [[T0]])
+; CHECK-NEXT:    call void @use64(i64 [[T1]])
+; CHECK-NEXT:    call void @use64(i64 [[T2]])
+; CHECK-NEXT:    call void @use32(i32 [[T3]])
+; CHECK-NEXT:    [[T4:%.*]] = and i64 [[T2]], [[X:%.*]]
+; CHECK-NEXT:    call void @use64(i64 [[T4]])
+; CHECK-NEXT:    [[T5:%.*]] = trunc i64 [[T4]] to i32
+; CHECK-NEXT:    [[T6:%.*]] = shl i32 [[T5]], [[T3]]
+; CHECK-NEXT:    ret i32 [[T6]]
+;
+  %t0 = zext i32 %nbits to i64
+  %t1 = shl i64 -1, %t0
+  %t2 = lshr i64 %t1, %t0
+  %t3 = add i32 %nbits, -33
+
+  call void @use64(i64 %t0)
+  call void @use64(i64 %t1)
+  call void @use64(i64 %t2)
+  call void @use32(i32 %t3)
+
+  %t4 = and i64 %t2, %x
+  call void @use64(i64 %t4)
+  %t5 = trunc i64 %t4 to i32
+  %t6 = shl i32 %t5, %t3 ; shift is smaller than mask
+  ret i32 %t6
+}
+
+define i32 @n5_extrause1(i64 %x, i32 %nbits) {
+; CHECK-LABEL: @n5_extrause1(
+; CHECK-NEXT:    [[T0:%.*]] = zext i32 [[NBITS:%.*]] to i64
+; CHECK-NEXT:    [[T1:%.*]] = shl i64 -1, [[T0]]
+; CHECK-NEXT:    [[T2:%.*]] = lshr i64 [[T1]], [[T0]]
+; CHECK-NEXT:    [[T3:%.*]] = add i32 [[NBITS]], -33
+; CHECK-NEXT:    call void @use64(i64 [[T0]])
+; CHECK-NEXT:    call void @use64(i64 [[T1]])
+; CHECK-NEXT:    call void @use64(i64 [[T2]])
+; CHECK-NEXT:    call void @use32(i32 [[T3]])
+; CHECK-NEXT:    [[T4:%.*]] = and i64 [[T2]], [[X:%.*]]
+; CHECK-NEXT:    [[T5:%.*]] = trunc i64 [[T4]] to i32
+; CHECK-NEXT:    call void @use32(i32 [[T5]])
+; CHECK-NEXT:    [[T6:%.*]] = shl i32 [[T5]], [[T3]]
+; CHECK-NEXT:    ret i32 [[T6]]
+;
+  %t0 = zext i32 %nbits to i64
+  %t1 = shl i64 -1, %t0
+  %t2 = lshr i64 %t1, %t0
+  %t3 = add i32 %nbits, -33
+
+  call void @use64(i64 %t0)
+  call void @use64(i64 %t1)
+  call void @use64(i64 %t2)
+  call void @use32(i32 %t3)
+
+  %t4 = and i64 %t2, %x
+  %t5 = trunc i64 %t4 to i32
+  call void @use32(i32 %t5)
+  %t6 = shl i32 %t5, %t3 ; shift is smaller than mask
+  ret i32 %t6
+}
+
+define i32 @n6_extrause2(i64 %x, i32 %nbits) {
+; CHECK-LABEL: @n6_extrause2(
+; CHECK-NEXT:    [[T0:%.*]] = zext i32 [[NBITS:%.*]] to i64
+; CHECK-NEXT:    [[T1:%.*]] = shl i64 -1, [[T0]]
+; CHECK-NEXT:    [[T2:%.*]] = lshr i64 [[T1]], [[T0]]
+; CHECK-NEXT:    [[T3:%.*]] = add i32 [[NBITS]], -33
+; CHECK-NEXT:    call void @use64(i64 [[T0]])
+; CHECK-NEXT:    call void @use64(i64 [[T1]])
+; CHECK-NEXT:    call void @use64(i64 [[T2]])
+; CHECK-NEXT:    call void @use32(i32 [[T3]])
+; CHECK-NEXT:    [[T4:%.*]] = and i64 [[T2]], [[X:%.*]]
+; CHECK-NEXT:    call void @use64(i64 [[T4]])
+; CHECK-NEXT:    [[T5:%.*]] = trunc i64 [[T4]] to i32
+; CHECK-NEXT:    call void @use32(i32 [[T5]])
+; CHECK-NEXT:    [[T6:%.*]] = shl i32 [[T5]], [[T3]]
+; CHECK-NEXT:    ret i32 [[T6]]
+;
+  %t0 = zext i32 %nbits to i64
+  %t1 = shl i64 -1, %t0
+  %t2 = lshr i64 %t1, %t0
+  %t3 = add i32 %nbits, -33
+
+  call void @use64(i64 %t0)
+  call void @use64(i64 %t1)
+  call void @use64(i64 %t2)
+  call void @use32(i32 %t3)
+
+  %t4 = and i64 %t2, %x
+  call void @use64(i64 %t4)
+  %t5 = trunc i64 %t4 to i32
+  call void @use32(i32 %t5)
+  %t6 = shl i32 %t5, %t3 ; shift is smaller than mask
+  ret i32 %t6
+}
diff --git a/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-e.ll b/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-e.ll
new file mode 100644
index 0000000..a9b3b76
--- /dev/null
+++ b/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-e.ll
@@ -0,0 +1,219 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt %s -instcombine -S | FileCheck %s
+
+; If we have some pattern that leaves only some low bits set, lshr then performs
+; left-shift of those bits, we can combine those two shifts into a shift+mask.
+
+; There are many variants to this pattern:
+;   e)  (trunc (((x << maskNbits) l>> maskNbits))) << shiftNbits
+; simplify to:
+;   ((trunc(x)) << shiftNbits) & (-1 >> ((-(maskNbits+shiftNbits))+32))
+
+; Simple tests.
+
+declare void @use32(i32)
+declare void @use64(i64)
+
+define i32 @t0_basic(i64 %x, i32 %nbits) {
+; CHECK-LABEL: @t0_basic(
+; CHECK-NEXT:    [[T0:%.*]] = zext i32 [[NBITS:%.*]] to i64
+; CHECK-NEXT:    [[T1:%.*]] = shl i64 [[X:%.*]], [[T0]]
+; CHECK-NEXT:    [[T2:%.*]] = add i32 [[NBITS]], -33
+; CHECK-NEXT:    call void @use64(i64 [[T0]])
+; CHECK-NEXT:    call void @use64(i64 [[T1]])
+; CHECK-NEXT:    call void @use32(i32 [[T2]])
+; CHECK-NEXT:    [[T3:%.*]] = lshr i64 [[T1]], [[T0]]
+; CHECK-NEXT:    [[T4:%.*]] = trunc i64 [[T3]] to i32
+; CHECK-NEXT:    [[T5:%.*]] = shl i32 [[T4]], [[T2]]
+; CHECK-NEXT:    ret i32 [[T5]]
+;
+  %t0 = zext i32 %nbits to i64
+  %t1 = shl i64 %x, %t0
+  %t2 = add i32 %nbits, -33
+
+  call void @use64(i64 %t0)
+  call void @use64(i64 %t1)
+  call void @use32(i32 %t2)
+
+  %t3 = lshr i64 %t1, %t0
+  %t4 = trunc i64 %t3 to i32
+  %t5 = shl i32 %t4, %t2 ; shift is smaller than mask
+  ret i32 %t5
+}
+
+; Vectors
+
+declare void @use8xi32(<8 x i32>)
+declare void @use8xi64(<8 x i64>)
+
+define <8 x i32> @t1_vec_splat(<8 x i64> %x, <8 x i32> %nbits) {
+; CHECK-LABEL: @t1_vec_splat(
+; CHECK-NEXT:    [[T0:%.*]] = zext <8 x i32> [[NBITS:%.*]] to <8 x i64>
+; CHECK-NEXT:    [[T1:%.*]] = shl <8 x i64> [[X:%.*]], [[T0]]
+; CHECK-NEXT:    [[T2:%.*]] = add <8 x i32> [[NBITS]], <i32 -33, i32 -33, i32 -33, i32 -33, i32 -33, i32 -33, i32 -33, i32 -33>
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T0]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T1]])
+; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T2]])
+; CHECK-NEXT:    [[T3:%.*]] = lshr <8 x i64> [[T1]], [[T0]]
+; CHECK-NEXT:    [[T4:%.*]] = trunc <8 x i64> [[T3]] to <8 x i32>
+; CHECK-NEXT:    [[T5:%.*]] = shl <8 x i32> [[T4]], [[T2]]
+; CHECK-NEXT:    ret <8 x i32> [[T5]]
+;
+  %t0 = zext <8 x i32> %nbits to <8 x i64>
+  %t1 = shl <8 x i64> %x, %t0
+  %t2 = add <8 x i32> %nbits, <i32 -33, i32 -33, i32 -33, i32 -33, i32 -33, i32 -33, i32 -33, i32 -33>
+
+  call void @use8xi64(<8 x i64> %t0)
+  call void @use8xi64(<8 x i64> %t1)
+  call void @use8xi32(<8 x i32> %t2)
+
+  %t3 = lshr <8 x i64> %t1, %t0
+  %t4 = trunc <8 x i64> %t3 to <8 x i32>
+  %t5 = shl <8 x i32> %t4, %t2 ; shift is smaller than mask
+  ret <8 x i32> %t5
+}
+
+define <8 x i32> @t2_vec_splat_undef(<8 x i64> %x, <8 x i32> %nbits) {
+; CHECK-LABEL: @t2_vec_splat_undef(
+; CHECK-NEXT:    [[T0:%.*]] = zext <8 x i32> [[NBITS:%.*]] to <8 x i64>
+; CHECK-NEXT:    [[T1:%.*]] = shl <8 x i64> [[X:%.*]], [[T0]]
+; CHECK-NEXT:    [[T2:%.*]] = add <8 x i32> [[NBITS]], <i32 -33, i32 -33, i32 -33, i32 -33, i32 -33, i32 -33, i32 undef, i32 -33>
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T0]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T1]])
+; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T2]])
+; CHECK-NEXT:    [[T3:%.*]] = lshr <8 x i64> [[T1]], [[T0]]
+; CHECK-NEXT:    [[T4:%.*]] = trunc <8 x i64> [[T3]] to <8 x i32>
+; CHECK-NEXT:    [[T5:%.*]] = shl <8 x i32> [[T4]], [[T2]]
+; CHECK-NEXT:    ret <8 x i32> [[T5]]
+;
+  %t0 = zext <8 x i32> %nbits to <8 x i64>
+  %t1 = shl <8 x i64> %x, %t0
+  %t2 = add <8 x i32> %nbits, <i32 -33, i32 -33, i32 -33, i32 -33, i32 -33, i32 -33, i32 undef, i32 -33>
+
+  call void @use8xi64(<8 x i64> %t0)
+  call void @use8xi64(<8 x i64> %t1)
+  call void @use8xi32(<8 x i32> %t2)
+
+  %t3 = lshr <8 x i64> %t1, %t0
+  %t4 = trunc <8 x i64> %t3 to <8 x i32>
+  %t5 = shl <8 x i32> %t4, %t2 ; shift is smaller than mask
+  ret <8 x i32> %t5
+}
+
+define <8 x i32> @t3_vec_nonsplat(<8 x i64> %x, <8 x i32> %nbits) {
+; CHECK-LABEL: @t3_vec_nonsplat(
+; CHECK-NEXT:    [[T0:%.*]] = zext <8 x i32> [[NBITS:%.*]] to <8 x i64>
+; CHECK-NEXT:    [[T1:%.*]] = shl <8 x i64> [[X:%.*]], [[T0]]
+; CHECK-NEXT:    [[T2:%.*]] = add <8 x i32> [[NBITS]], <i32 -64, i32 -63, i32 -33, i32 -32, i32 63, i32 64, i32 undef, i32 65>
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T0]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T1]])
+; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T2]])
+; CHECK-NEXT:    [[T3:%.*]] = lshr <8 x i64> [[T1]], [[T0]]
+; CHECK-NEXT:    [[T4:%.*]] = trunc <8 x i64> [[T3]] to <8 x i32>
+; CHECK-NEXT:    [[T5:%.*]] = shl <8 x i32> [[T4]], [[T2]]
+; CHECK-NEXT:    ret <8 x i32> [[T5]]
+;
+  %t0 = zext <8 x i32> %nbits to <8 x i64>
+  %t1 = shl <8 x i64> %x, %t0
+  %t2 = add <8 x i32> %nbits, <i32 -64, i32 -63, i32 -33, i32 -32, i32 63, i32 64, i32 undef, i32 65>
+
+  call void @use8xi64(<8 x i64> %t0)
+  call void @use8xi64(<8 x i64> %t1)
+  call void @use8xi32(<8 x i32> %t2)
+
+  %t3 = lshr <8 x i64> %t1, %t0
+  %t4 = trunc <8 x i64> %t3 to <8 x i32>
+  %t5 = shl <8 x i32> %t4, %t2 ; shift is smaller than mask
+  ret <8 x i32> %t5
+}
+
+; Extra uses.
+
+define i32 @n4_extrause0(i64 %x, i32 %nbits) {
+; CHECK-LABEL: @n4_extrause0(
+; CHECK-NEXT:    [[T0:%.*]] = zext i32 [[NBITS:%.*]] to i64
+; CHECK-NEXT:    [[T1:%.*]] = shl i64 [[X:%.*]], [[T0]]
+; CHECK-NEXT:    [[T2:%.*]] = add i32 [[NBITS]], -33
+; CHECK-NEXT:    call void @use64(i64 [[T0]])
+; CHECK-NEXT:    call void @use64(i64 [[T1]])
+; CHECK-NEXT:    call void @use32(i32 [[T2]])
+; CHECK-NEXT:    [[T3:%.*]] = lshr i64 [[T1]], [[T0]]
+; CHECK-NEXT:    call void @use64(i64 [[T3]])
+; CHECK-NEXT:    [[T4:%.*]] = trunc i64 [[T3]] to i32
+; CHECK-NEXT:    [[T5:%.*]] = shl i32 [[T4]], [[T2]]
+; CHECK-NEXT:    ret i32 [[T5]]
+;
+  %t0 = zext i32 %nbits to i64
+  %t1 = shl i64 %x, %t0
+  %t2 = add i32 %nbits, -33
+
+  call void @use64(i64 %t0)
+  call void @use64(i64 %t1)
+  call void @use32(i32 %t2)
+
+  %t3 = lshr i64 %t1, %t0
+  call void @use64(i64 %t3)
+  %t4 = trunc i64 %t3 to i32
+  %t5 = shl i32 %t4, %t2 ; shift is smaller than mask
+  ret i32 %t5
+}
+
+define i32 @n5_extrause1(i64 %x, i32 %nbits) {
+; CHECK-LABEL: @n5_extrause1(
+; CHECK-NEXT:    [[T0:%.*]] = zext i32 [[NBITS:%.*]] to i64
+; CHECK-NEXT:    [[T1:%.*]] = shl i64 [[X:%.*]], [[T0]]
+; CHECK-NEXT:    [[T2:%.*]] = add i32 [[NBITS]], -33
+; CHECK-NEXT:    call void @use64(i64 [[T0]])
+; CHECK-NEXT:    call void @use64(i64 [[T1]])
+; CHECK-NEXT:    call void @use32(i32 [[T2]])
+; CHECK-NEXT:    [[T3:%.*]] = lshr i64 [[T1]], [[T0]]
+; CHECK-NEXT:    [[T4:%.*]] = trunc i64 [[T3]] to i32
+; CHECK-NEXT:    call void @use32(i32 [[T4]])
+; CHECK-NEXT:    [[T5:%.*]] = shl i32 [[T4]], [[T2]]
+; CHECK-NEXT:    ret i32 [[T5]]
+;
+  %t0 = zext i32 %nbits to i64
+  %t1 = shl i64 %x, %t0
+  %t2 = add i32 %nbits, -33
+
+  call void @use64(i64 %t0)
+  call void @use64(i64 %t1)
+  call void @use32(i32 %t2)
+
+  %t3 = lshr i64 %t1, %t0
+  %t4 = trunc i64 %t3 to i32
+  call void @use32(i32 %t4)
+  %t5 = shl i32 %t4, %t2 ; shift is smaller than mask
+  ret i32 %t5
+}
+
+define i32 @n6_extrause2(i64 %x, i32 %nbits) {
+; CHECK-LABEL: @n6_extrause2(
+; CHECK-NEXT:    [[T0:%.*]] = zext i32 [[NBITS:%.*]] to i64
+; CHECK-NEXT:    [[T1:%.*]] = shl i64 [[X:%.*]], [[T0]]
+; CHECK-NEXT:    [[T2:%.*]] = add i32 [[NBITS]], -33
+; CHECK-NEXT:    call void @use64(i64 [[T0]])
+; CHECK-NEXT:    call void @use64(i64 [[T1]])
+; CHECK-NEXT:    call void @use32(i32 [[T2]])
+; CHECK-NEXT:    [[T3:%.*]] = lshr i64 [[T1]], [[T0]]
+; CHECK-NEXT:    call void @use64(i64 [[T3]])
+; CHECK-NEXT:    [[T4:%.*]] = trunc i64 [[T3]] to i32
+; CHECK-NEXT:    call void @use32(i32 [[T4]])
+; CHECK-NEXT:    [[T5:%.*]] = shl i32 [[T4]], [[T2]]
+; CHECK-NEXT:    ret i32 [[T5]]
+;
+  %t0 = zext i32 %nbits to i64
+  %t1 = shl i64 %x, %t0
+  %t2 = add i32 %nbits, -33
+
+  call void @use64(i64 %t0)
+  call void @use64(i64 %t1)
+  call void @use32(i32 %t2)
+
+  %t3 = lshr i64 %t1, %t0
+  call void @use64(i64 %t3)
+  %t4 = trunc i64 %t3 to i32
+  call void @use32(i32 %t4)
+  %t5 = shl i32 %t4, %t2 ; shift is smaller than mask
+  ret i32 %t5
+}
diff --git a/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-a.ll b/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-a.ll
new file mode 100644
index 0000000..596ae1c
--- /dev/null
+++ b/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-a.ll
@@ -0,0 +1,199 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt %s -instcombine -S | FileCheck %s
+
+; If we have some pattern that leaves only some low bits set, and then performs
+; left-shift of those bits, we can combine those two shifts into a shift+mask.
+
+; There are many variants to this pattern:
+;   a)  (trunc (x & ((1 << maskNbits) - 1))) << shiftNbits
+; simplify to:
+;   (trunc(x)) << shiftNbits
+
+; Simple tests.
+
+declare void @use32(i32)
+declare void @use64(i64)
+
+define i32 @t0_basic(i64 %x, i32 %nbits) {
+; CHECK-LABEL: @t0_basic(
+; CHECK-NEXT:    [[T0:%.*]] = zext i32 [[NBITS:%.*]] to i64
+; CHECK-NEXT:    [[T1:%.*]] = shl i64 1, [[T0]]
+; CHECK-NEXT:    [[T2:%.*]] = add i64 [[T1]], -1
+; CHECK-NEXT:    [[T3:%.*]] = sub i32 32, [[NBITS]]
+; CHECK-NEXT:    [[T4:%.*]] = and i64 [[T2]], [[X:%.*]]
+; CHECK-NEXT:    call void @use32(i32 [[NBITS]])
+; CHECK-NEXT:    call void @use64(i64 [[T0]])
+; CHECK-NEXT:    call void @use64(i64 [[T1]])
+; CHECK-NEXT:    call void @use64(i64 [[T2]])
+; CHECK-NEXT:    call void @use32(i32 [[T3]])
+; CHECK-NEXT:    call void @use64(i64 [[T4]])
+; CHECK-NEXT:    [[T5:%.*]] = trunc i64 [[T4]] to i32
+; CHECK-NEXT:    [[T6:%.*]] = shl i32 [[T5]], [[T3]]
+; CHECK-NEXT:    ret i32 [[T6]]
+;
+  %t0 = zext i32 %nbits to i64
+  %t1 = shl i64 1, %t0
+  %t2 = add i64 %t1, -1
+  %t3 = sub i32 32, %nbits
+  %t4 = and i64 %t2, %x
+
+  call void @use32(i32 %nbits)
+  call void @use64(i64 %t0)
+  call void @use64(i64 %t1)
+  call void @use64(i64 %t2)
+  call void @use32(i32 %t3)
+  call void @use64(i64 %t4)
+
+  %t5 = trunc i64 %t4 to i32
+  %t6 = shl i32 %t5, %t3
+  ret i32 %t6
+}
+
+; Vectors
+
+declare void @use8xi32(<8 x i32>)
+declare void @use8xi64(<8 x i64>)
+
+define <8 x i32> @t1_vec_splat(<8 x i64> %x, <8 x i32> %nbits) {
+; CHECK-LABEL: @t1_vec_splat(
+; CHECK-NEXT:    [[T0:%.*]] = zext <8 x i32> [[NBITS:%.*]] to <8 x i64>
+; CHECK-NEXT:    [[T1:%.*]] = shl <8 x i64> <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>, [[T0]]
+; CHECK-NEXT:    [[T2:%.*]] = add <8 x i64> [[T1]], <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>
+; CHECK-NEXT:    [[T3:%.*]] = sub <8 x i32> <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>, [[NBITS]]
+; CHECK-NEXT:    [[T4:%.*]] = and <8 x i64> [[T2]], [[X:%.*]]
+; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[NBITS]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T0]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T1]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T2]])
+; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T3]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T4]])
+; CHECK-NEXT:    [[T5:%.*]] = trunc <8 x i64> [[T4]] to <8 x i32>
+; CHECK-NEXT:    [[T6:%.*]] = shl <8 x i32> [[T5]], [[T3]]
+; CHECK-NEXT:    ret <8 x i32> [[T6]]
+;
+  %t0 = zext <8 x i32> %nbits to <8 x i64>
+  %t1 = shl <8 x i64> <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>, %t0
+  %t2 = add <8 x i64> %t1, <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>
+  %t3 = sub <8 x i32> <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>, %nbits
+  %t4 = and <8 x i64> %t2, %x
+
+  call void @use8xi32(<8 x i32> %nbits)
+  call void @use8xi64(<8 x i64> %t0)
+  call void @use8xi64(<8 x i64> %t1)
+  call void @use8xi64(<8 x i64> %t2)
+  call void @use8xi32(<8 x i32> %t3)
+  call void @use8xi64(<8 x i64> %t4)
+
+  %t5 = trunc <8 x i64> %t4 to <8 x i32>
+  %t6 = shl <8 x i32> %t5, %t3
+  ret <8 x i32> %t6
+}
+
+define <8 x i32> @t2_vec_splat_undef(<8 x i64> %x, <8 x i32> %nbits) {
+; CHECK-LABEL: @t2_vec_splat_undef(
+; CHECK-NEXT:    [[T0:%.*]] = zext <8 x i32> [[NBITS:%.*]] to <8 x i64>
+; CHECK-NEXT:    [[T1:%.*]] = shl <8 x i64> <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 undef, i64 1>, [[T0]]
+; CHECK-NEXT:    [[T2:%.*]] = add <8 x i64> [[T1]], <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 undef, i64 -1>
+; CHECK-NEXT:    [[T3:%.*]] = sub <8 x i32> <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 undef, i32 32>, [[NBITS]]
+; CHECK-NEXT:    [[T4:%.*]] = and <8 x i64> [[T2]], [[X:%.*]]
+; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[NBITS]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T0]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T1]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T2]])
+; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T3]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T4]])
+; CHECK-NEXT:    [[T5:%.*]] = trunc <8 x i64> [[T4]] to <8 x i32>
+; CHECK-NEXT:    [[T6:%.*]] = shl <8 x i32> [[T5]], [[T3]]
+; CHECK-NEXT:    ret <8 x i32> [[T6]]
+;
+  %t0 = zext <8 x i32> %nbits to <8 x i64>
+  %t1 = shl <8 x i64> <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 undef, i64 1>, %t0
+  %t2 = add <8 x i64> %t1, <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 undef, i64 -1>
+  %t3 = sub <8 x i32> <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 undef, i32 32>, %nbits
+  %t4 = and <8 x i64> %t2, %x
+
+  call void @use8xi32(<8 x i32> %nbits)
+  call void @use8xi64(<8 x i64> %t0)
+  call void @use8xi64(<8 x i64> %t1)
+  call void @use8xi64(<8 x i64> %t2)
+  call void @use8xi32(<8 x i32> %t3)
+  call void @use8xi64(<8 x i64> %t4)
+
+  %t5 = trunc <8 x i64> %t4 to <8 x i32>
+  %t6 = shl <8 x i32> %t5, %t3
+  ret <8 x i32> %t6
+}
+
+define <8 x i32> @t3_vec_nonsplat(<8 x i64> %x, <8 x i32> %nbits) {
+; CHECK-LABEL: @t3_vec_nonsplat(
+; CHECK-NEXT:    [[T0:%.*]] = zext <8 x i32> [[NBITS:%.*]] to <8 x i64>
+; CHECK-NEXT:    [[T1:%.*]] = shl <8 x i64> <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>, [[T0]]
+; CHECK-NEXT:    [[T2:%.*]] = add <8 x i64> [[T1]], <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>
+; CHECK-NEXT:    [[T3:%.*]] = sub <8 x i32> <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>, [[NBITS]]
+; CHECK-NEXT:    [[T4:%.*]] = and <8 x i64> [[T2]], [[X:%.*]]
+; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[NBITS]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T0]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T1]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T2]])
+; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T3]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T4]])
+; CHECK-NEXT:    [[T5:%.*]] = trunc <8 x i64> [[T4]] to <8 x i32>
+; CHECK-NEXT:    [[T6:%.*]] = shl <8 x i32> [[T5]], [[T3]]
+; CHECK-NEXT:    ret <8 x i32> [[T6]]
+;
+  %t0 = zext <8 x i32> %nbits to <8 x i64>
+  %t1 = shl <8 x i64> <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>, %t0
+  %t2 = add <8 x i64> %t1, <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>
+  %t3 = sub <8 x i32> <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>, %nbits
+  %t4 = and <8 x i64> %t2, %x
+
+  call void @use8xi32(<8 x i32> %nbits)
+  call void @use8xi64(<8 x i64> %t0)
+  call void @use8xi64(<8 x i64> %t1)
+  call void @use8xi64(<8 x i64> %t2)
+  call void @use8xi32(<8 x i32> %t3)
+  call void @use8xi64(<8 x i64> %t4)
+
+  %t5 = trunc <8 x i64> %t4 to <8 x i32>
+  %t6 = shl <8 x i32> %t5, %t3
+  ret <8 x i32> %t6
+}
+
+; Extra uses
+
+define i32 @n4_extrause(i64 %x, i32 %nbits) {
+; CHECK-LABEL: @n4_extrause(
+; CHECK-NEXT:    [[T0:%.*]] = zext i32 [[NBITS:%.*]] to i64
+; CHECK-NEXT:    [[T1:%.*]] = shl i64 1, [[T0]]
+; CHECK-NEXT:    [[T2:%.*]] = add i64 [[T1]], -1
+; CHECK-NEXT:    [[T3:%.*]] = sub i32 32, [[NBITS]]
+; CHECK-NEXT:    [[T4:%.*]] = and i64 [[T2]], [[X:%.*]]
+; CHECK-NEXT:    call void @use32(i32 [[NBITS]])
+; CHECK-NEXT:    call void @use64(i64 [[T0]])
+; CHECK-NEXT:    call void @use64(i64 [[T1]])
+; CHECK-NEXT:    call void @use64(i64 [[T2]])
+; CHECK-NEXT:    call void @use32(i32 [[T3]])
+; CHECK-NEXT:    call void @use64(i64 [[T4]])
+; CHECK-NEXT:    [[T5:%.*]] = trunc i64 [[T4]] to i32
+; CHECK-NEXT:    call void @use32(i32 [[T5]])
+; CHECK-NEXT:    [[T6:%.*]] = shl i32 [[T5]], [[T3]]
+; CHECK-NEXT:    ret i32 [[T6]]
+;
+  %t0 = zext i32 %nbits to i64
+  %t1 = shl i64 1, %t0
+  %t2 = add i64 %t1, -1
+  %t3 = sub i32 32, %nbits
+  %t4 = and i64 %t2, %x
+
+  call void @use32(i32 %nbits)
+  call void @use64(i64 %t0)
+  call void @use64(i64 %t1)
+  call void @use64(i64 %t2)
+  call void @use32(i32 %t3)
+  call void @use64(i64 %t4)
+
+  %t5 = trunc i64 %t4 to i32
+  call void @use32(i32 %t5)
+  %t6 = shl i32 %t5, %t3
+  ret i32 %t6
+}
diff --git a/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-b.ll b/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-b.ll
new file mode 100644
index 0000000..048db9a
--- /dev/null
+++ b/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-b.ll
@@ -0,0 +1,238 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt %s -instcombine -S | FileCheck %s
+
+; If we have some pattern that leaves only some low bits set, and then performs
+; left-shift of those bits, we can combine those two shifts into a shift+mask.
+
+; There are many variants to this pattern:
+;   b)  (trunc ((x & (~(-1 << maskNbits))))) << shiftNbits
+; simplify to:
+;   ((trunc(x)) << shiftNbits) & (~(-1 << (maskNbits+shiftNbits)))
+
+; Simple tests.
+
+declare void @use32(i32)
+declare void @use64(i64)
+
+define i32 @t0_basic(i64 %x, i32 %nbits) {
+; CHECK-LABEL: @t0_basic(
+; CHECK-NEXT:    [[T0:%.*]] = zext i32 [[NBITS:%.*]] to i64
+; CHECK-NEXT:    [[T1:%.*]] = shl i64 -1, [[T0]]
+; CHECK-NEXT:    [[T2:%.*]] = xor i64 [[T1]], -1
+; CHECK-NEXT:    [[T3:%.*]] = sub i32 32, [[NBITS]]
+; CHECK-NEXT:    [[T4:%.*]] = and i64 [[T2]], [[X:%.*]]
+; CHECK-NEXT:    call void @use32(i32 [[NBITS]])
+; CHECK-NEXT:    call void @use64(i64 [[T0]])
+; CHECK-NEXT:    call void @use64(i64 [[T1]])
+; CHECK-NEXT:    call void @use64(i64 [[T2]])
+; CHECK-NEXT:    call void @use32(i32 [[T3]])
+; CHECK-NEXT:    call void @use64(i64 [[T4]])
+; CHECK-NEXT:    [[T5:%.*]] = trunc i64 [[T4]] to i32
+; CHECK-NEXT:    [[T6:%.*]] = shl i32 [[T5]], [[T3]]
+; CHECK-NEXT:    ret i32 [[T6]]
+;
+  %t0 = zext i32 %nbits to i64
+  %t1 = shl i64 -1, %t0
+  %t2 = xor i64 %t1, -1
+  %t3 = sub i32 32, %nbits
+  %t4 = and i64 %t2, %x
+
+  call void @use32(i32 %nbits)
+  call void @use64(i64 %t0)
+  call void @use64(i64 %t1)
+  call void @use64(i64 %t2)
+  call void @use32(i32 %t3)
+  call void @use64(i64 %t4)
+
+  %t5 = trunc i64 %t4 to i32
+  %t6 = shl i32 %t5, %t3
+  ret i32 %t6
+}
+
+; Vectors
+
+declare void @use8xi32(<8 x i32>)
+declare void @use8xi64(<8 x i64>)
+
+define <8 x i32> @t1_vec_splat(<8 x i64> %x, <8 x i32> %nbits) {
+; CHECK-LABEL: @t1_vec_splat(
+; CHECK-NEXT:    [[T0:%.*]] = zext <8 x i32> [[NBITS:%.*]] to <8 x i64>
+; CHECK-NEXT:    [[T1:%.*]] = shl <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, [[T0]]
+; CHECK-NEXT:    [[T2:%.*]] = xor <8 x i64> [[T1]], <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>
+; CHECK-NEXT:    [[T3:%.*]] = sub <8 x i32> <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>, [[NBITS]]
+; CHECK-NEXT:    [[T4:%.*]] = and <8 x i64> [[T2]], [[X:%.*]]
+; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[NBITS]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T0]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T1]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T2]])
+; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T3]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T4]])
+; CHECK-NEXT:    [[T5:%.*]] = trunc <8 x i64> [[T4]] to <8 x i32>
+; CHECK-NEXT:    [[T6:%.*]] = shl <8 x i32> [[T5]], [[T3]]
+; CHECK-NEXT:    ret <8 x i32> [[T6]]
+;
+  %t0 = zext <8 x i32> %nbits to <8 x i64>
+  %t1 = shl <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, %t0
+  %t2 = xor <8 x i64> %t1, <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>
+  %t3 = sub <8 x i32> <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>, %nbits
+  %t4 = and <8 x i64> %t2, %x
+
+  call void @use8xi32(<8 x i32> %nbits)
+  call void @use8xi64(<8 x i64> %t0)
+  call void @use8xi64(<8 x i64> %t1)
+  call void @use8xi64(<8 x i64> %t2)
+  call void @use8xi32(<8 x i32> %t3)
+  call void @use8xi64(<8 x i64> %t4)
+
+  %t5 = trunc <8 x i64> %t4 to <8 x i32>
+  %t6 = shl <8 x i32> %t5, %t3
+  ret <8 x i32> %t6
+}
+
+define <8 x i32> @t2_vec_splat_undef(<8 x i64> %x, <8 x i32> %nbits) {
+; CHECK-LABEL: @t2_vec_splat_undef(
+; CHECK-NEXT:    [[T0:%.*]] = zext <8 x i32> [[NBITS:%.*]] to <8 x i64>
+; CHECK-NEXT:    [[T1:%.*]] = shl <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 undef, i64 -1>, [[T0]]
+; CHECK-NEXT:    [[T2:%.*]] = xor <8 x i64> [[T1]], <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 undef, i64 -1>
+; CHECK-NEXT:    [[T3:%.*]] = sub <8 x i32> <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 undef, i32 32>, [[NBITS]]
+; CHECK-NEXT:    [[T4:%.*]] = and <8 x i64> [[T2]], [[X:%.*]]
+; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[NBITS]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T0]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T1]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T2]])
+; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T3]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T4]])
+; CHECK-NEXT:    [[T5:%.*]] = trunc <8 x i64> [[T4]] to <8 x i32>
+; CHECK-NEXT:    [[T6:%.*]] = shl <8 x i32> [[T5]], [[T3]]
+; CHECK-NEXT:    ret <8 x i32> [[T6]]
+;
+  %t0 = zext <8 x i32> %nbits to <8 x i64>
+  %t1 = shl <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 undef, i64 -1>, %t0
+  %t2 = xor <8 x i64> %t1, <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 undef, i64 -1>
+  %t3 = sub <8 x i32> <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 undef, i32 32>, %nbits
+  %t4 = and <8 x i64> %t2, %x
+
+  call void @use8xi32(<8 x i32> %nbits)
+  call void @use8xi64(<8 x i64> %t0)
+  call void @use8xi64(<8 x i64> %t1)
+  call void @use8xi64(<8 x i64> %t2)
+  call void @use8xi32(<8 x i32> %t3)
+  call void @use8xi64(<8 x i64> %t4)
+
+  %t5 = trunc <8 x i64> %t4 to <8 x i32>
+  %t6 = shl <8 x i32> %t5, %t3
+  ret <8 x i32> %t6
+}
+
+define <8 x i32> @t3_vec_nonsplat(<8 x i64> %x, <8 x i32> %nbits) {
+; CHECK-LABEL: @t3_vec_nonsplat(
+; CHECK-NEXT:    [[T0:%.*]] = add <8 x i32> [[NBITS:%.*]], <i32 -1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0>
+; CHECK-NEXT:    [[T1:%.*]] = zext <8 x i32> [[T0]] to <8 x i64>
+; CHECK-NEXT:    [[T2:%.*]] = shl <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, [[T1]]
+; CHECK-NEXT:    [[T3:%.*]] = xor <8 x i64> [[T2]], <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>
+; CHECK-NEXT:    [[T4:%.*]] = sub <8 x i32> <i32 33, i32 32, i32 33, i32 32, i32 32, i32 32, i32 32, i32 32>, [[NBITS]]
+; CHECK-NEXT:    [[T5:%.*]] = and <8 x i64> [[T3]], [[X:%.*]]
+; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T0]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T1]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T2]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T3]])
+; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T4]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T5]])
+; CHECK-NEXT:    [[T6:%.*]] = trunc <8 x i64> [[T5]] to <8 x i32>
+; CHECK-NEXT:    [[T7:%.*]] = shl <8 x i32> [[T6]], [[T4]]
+; CHECK-NEXT:    ret <8 x i32> [[T7]]
+;
+  %t0 = add <8 x i32> %nbits, <i32 -1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0>
+  %t1 = zext <8 x i32> %t0 to <8 x i64>
+  %t2 = shl <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, %t1 ; shifting by nbits-1
+  %t3 = xor <8 x i64> %t2, <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>
+  %t4 = sub <8 x i32> <i32 33, i32 32, i32 33, i32 32, i32 32, i32 32, i32 32, i32 32>, %nbits
+  %t5 = and <8 x i64> %t3, %x
+
+  call void @use8xi32(<8 x i32> %t0)
+  call void @use8xi64(<8 x i64> %t1)
+  call void @use8xi64(<8 x i64> %t2)
+  call void @use8xi64(<8 x i64> %t3)
+  call void @use8xi32(<8 x i32> %t4)
+  call void @use8xi64(<8 x i64> %t5)
+
+  %t6 = trunc <8 x i64> %t5 to <8 x i32>
+  %t7 = shl <8 x i32> %t6, %t4
+  ret <8 x i32> %t7
+}
+
+; -1 can be truncated.
+
+define i32 @t4_allones_trunc(i64 %x, i32 %nbits) {
+; CHECK-LABEL: @t4_allones_trunc(
+; CHECK-NEXT:    [[T0:%.*]] = zext i32 [[NBITS:%.*]] to i64
+; CHECK-NEXT:    [[T1:%.*]] = shl i64 -1, [[T0]]
+; CHECK-NEXT:    [[T2:%.*]] = xor i64 [[T1]], 4294967295
+; CHECK-NEXT:    [[T3:%.*]] = sub i32 32, [[NBITS]]
+; CHECK-NEXT:    [[T4:%.*]] = and i64 [[T2]], [[X:%.*]]
+; CHECK-NEXT:    call void @use32(i32 [[NBITS]])
+; CHECK-NEXT:    call void @use64(i64 [[T0]])
+; CHECK-NEXT:    call void @use64(i64 [[T1]])
+; CHECK-NEXT:    call void @use64(i64 [[T2]])
+; CHECK-NEXT:    call void @use32(i32 [[T3]])
+; CHECK-NEXT:    call void @use64(i64 [[T4]])
+; CHECK-NEXT:    [[T5:%.*]] = trunc i64 [[T4]] to i32
+; CHECK-NEXT:    [[T6:%.*]] = shl i32 [[T5]], [[T3]]
+; CHECK-NEXT:    ret i32 [[T6]]
+;
+  %t0 = zext i32 %nbits to i64
+  %t1 = shl i64 -1, %t0
+  %t2 = xor i64 %t1, 4294967295
+  %t3 = sub i32 32, %nbits
+  %t4 = and i64 %t2, %x
+
+  call void @use32(i32 %nbits)
+  call void @use64(i64 %t0)
+  call void @use64(i64 %t1)
+  call void @use64(i64 %t2)
+  call void @use32(i32 %t3)
+  call void @use64(i64 %t4)
+
+  %t5 = trunc i64 %t4 to i32
+  %t6 = shl i32 %t5, %t3
+  ret i32 %t6
+}
+
+; Extra uses
+
+define i32 @n5_extrause(i64 %x, i32 %nbits) {
+; CHECK-LABEL: @n5_extrause(
+; CHECK-NEXT:    [[T0:%.*]] = zext i32 [[NBITS:%.*]] to i64
+; CHECK-NEXT:    [[T1:%.*]] = shl i64 -1, [[T0]]
+; CHECK-NEXT:    [[T2:%.*]] = xor i64 [[T1]], -1
+; CHECK-NEXT:    [[T3:%.*]] = sub i32 32, [[NBITS]]
+; CHECK-NEXT:    [[T4:%.*]] = and i64 [[T2]], [[X:%.*]]
+; CHECK-NEXT:    call void @use32(i32 [[NBITS]])
+; CHECK-NEXT:    call void @use64(i64 [[T0]])
+; CHECK-NEXT:    call void @use64(i64 [[T1]])
+; CHECK-NEXT:    call void @use64(i64 [[T2]])
+; CHECK-NEXT:    call void @use32(i32 [[T3]])
+; CHECK-NEXT:    call void @use64(i64 [[T4]])
+; CHECK-NEXT:    [[T5:%.*]] = trunc i64 [[T4]] to i32
+; CHECK-NEXT:    call void @use32(i32 [[T5]])
+; CHECK-NEXT:    [[T6:%.*]] = shl i32 [[T5]], [[T3]]
+; CHECK-NEXT:    ret i32 [[T6]]
+;
+  %t0 = zext i32 %nbits to i64
+  %t1 = shl i64 -1, %t0
+  %t2 = xor i64 %t1, -1
+  %t3 = sub i32 32, %nbits
+  %t4 = and i64 %t2, %x
+
+  call void @use32(i32 %nbits)
+  call void @use64(i64 %t0)
+  call void @use64(i64 %t1)
+  call void @use64(i64 %t2)
+  call void @use32(i32 %t3)
+  call void @use64(i64 %t4)
+
+  %t5 = trunc i64 %t4 to i32
+  call void @use32(i32 %t5)
+  %t6 = shl i32 %t5, %t3
+  ret i32 %t6
+}
diff --git a/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-c.ll b/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-c.ll
new file mode 100644
index 0000000..67849aa
--- /dev/null
+++ b/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-c.ll
@@ -0,0 +1,169 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt %s -instcombine -S | FileCheck %s
+
+; If we have some pattern that leaves only some low bits set, and then performs
+; left-shift of those bits, we can combine those two shifts into a shift+mask.
+
+; There are many variants to this pattern:
+;   c)  (trunc ((x & (-1 >> maskNbits)))) << shiftNbits
+; simplify to:
+;   (trunc(x)) << shiftNbits
+
+; Simple tests.
+
+declare void @use32(i32)
+declare void @use64(i64)
+
+define i32 @t0_basic(i64 %x, i32 %nbits) {
+; CHECK-LABEL: @t0_basic(
+; CHECK-NEXT:    [[T0:%.*]] = zext i32 [[NBITS:%.*]] to i64
+; CHECK-NEXT:    [[T1:%.*]] = lshr i64 -1, [[T0]]
+; CHECK-NEXT:    [[T2:%.*]] = add i32 [[NBITS]], -32
+; CHECK-NEXT:    [[T3:%.*]] = and i64 [[T1]], [[X:%.*]]
+; CHECK-NEXT:    call void @use64(i64 [[T0]])
+; CHECK-NEXT:    call void @use64(i64 [[T1]])
+; CHECK-NEXT:    call void @use32(i32 [[T2]])
+; CHECK-NEXT:    call void @use64(i64 [[T3]])
+; CHECK-NEXT:    [[T4:%.*]] = trunc i64 [[T3]] to i32
+; CHECK-NEXT:    [[T5:%.*]] = shl i32 [[T4]], [[T2]]
+; CHECK-NEXT:    ret i32 [[T5]]
+;
+  %t0 = zext i32 %nbits to i64
+  %t1 = lshr i64 -1, %t0
+  %t2 = add i32 %nbits, -32
+  %t3 = and i64 %t1, %x
+
+  call void @use64(i64 %t0)
+  call void @use64(i64 %t1)
+  call void @use32(i32 %t2)
+  call void @use64(i64 %t3)
+
+  %t4 = trunc i64 %t3 to i32
+  %t5 = shl i32 %t4, %t2
+  ret i32 %t5
+}
+
+; Vectors
+
+declare void @use8xi32(<8 x i32>)
+declare void @use8xi64(<8 x i64>)
+
+define <8 x i32> @t1_vec_splat(<8 x i64> %x, <8 x i32> %nbits) {
+; CHECK-LABEL: @t1_vec_splat(
+; CHECK-NEXT:    [[T0:%.*]] = zext <8 x i32> [[NBITS:%.*]] to <8 x i64>
+; CHECK-NEXT:    [[T1:%.*]] = lshr <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, [[T0]]
+; CHECK-NEXT:    [[T2:%.*]] = add <8 x i32> [[NBITS]], <i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 -32>
+; CHECK-NEXT:    [[T3:%.*]] = and <8 x i64> [[T1]], [[X:%.*]]
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T0]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T1]])
+; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T2]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T3]])
+; CHECK-NEXT:    [[T4:%.*]] = trunc <8 x i64> [[T3]] to <8 x i32>
+; CHECK-NEXT:    [[T5:%.*]] = shl <8 x i32> [[T4]], [[T2]]
+; CHECK-NEXT:    ret <8 x i32> [[T5]]
+;
+  %t0 = zext <8 x i32> %nbits to <8 x i64>
+  %t1 = lshr <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, %t0
+  %t2 = add <8 x i32> %nbits, <i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 -32>
+  %t3 = and <8 x i64> %t1, %x
+
+  call void @use8xi64(<8 x i64> %t0)
+  call void @use8xi64(<8 x i64> %t1)
+  call void @use8xi32(<8 x i32> %t2)
+  call void @use8xi64(<8 x i64> %t3)
+
+  %t4 = trunc <8 x i64> %t3 to <8 x i32>
+  %t5 = shl <8 x i32> %t4, %t2
+  ret <8 x i32> %t5
+}
+
+define <8 x i32> @t2_vec_splat_undef(<8 x i64> %x, <8 x i32> %nbits) {
+; CHECK-LABEL: @t2_vec_splat_undef(
+; CHECK-NEXT:    [[T0:%.*]] = zext <8 x i32> [[NBITS:%.*]] to <8 x i64>
+; CHECK-NEXT:    [[T1:%.*]] = lshr <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 undef, i64 -1>, [[T0]]
+; CHECK-NEXT:    [[T2:%.*]] = add <8 x i32> [[NBITS]], <i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 undef, i32 -32>
+; CHECK-NEXT:    [[T3:%.*]] = and <8 x i64> [[T1]], [[X:%.*]]
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T0]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T1]])
+; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T2]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T3]])
+; CHECK-NEXT:    [[T4:%.*]] = trunc <8 x i64> [[T3]] to <8 x i32>
+; CHECK-NEXT:    [[T5:%.*]] = shl <8 x i32> [[T4]], [[T2]]
+; CHECK-NEXT:    ret <8 x i32> [[T5]]
+;
+  %t0 = zext <8 x i32> %nbits to <8 x i64>
+  %t1 = lshr <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 undef, i64 -1>, %t0
+  %t2 = add <8 x i32> %nbits, <i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 undef, i32 -32>
+  %t3 = and <8 x i64> %t1, %x
+
+  call void @use8xi64(<8 x i64> %t0)
+  call void @use8xi64(<8 x i64> %t1)
+  call void @use8xi32(<8 x i32> %t2)
+  call void @use8xi64(<8 x i64> %t3)
+
+  %t4 = trunc <8 x i64> %t3 to <8 x i32>
+  %t5 = shl <8 x i32> %t4, %t2
+  ret <8 x i32> %t5
+}
+
+define <8 x i32> @t3_vec_nonsplat(<8 x i64> %x, <8 x i32> %nbits) {
+; CHECK-LABEL: @t3_vec_nonsplat(
+; CHECK-NEXT:    [[T0:%.*]] = zext <8 x i32> [[NBITS:%.*]] to <8 x i64>
+; CHECK-NEXT:    [[T1:%.*]] = lshr <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 undef, i64 -1>, [[T0]]
+; CHECK-NEXT:    [[T2:%.*]] = add <8 x i32> [[NBITS]], <i32 -32, i32 -1, i32 0, i32 1, i32 31, i32 32, i32 undef, i32 64>
+; CHECK-NEXT:    [[T3:%.*]] = and <8 x i64> [[T1]], [[X:%.*]]
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T0]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T1]])
+; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T2]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T3]])
+; CHECK-NEXT:    [[T4:%.*]] = trunc <8 x i64> [[T3]] to <8 x i32>
+; CHECK-NEXT:    [[T5:%.*]] = shl <8 x i32> [[T4]], [[T2]]
+; CHECK-NEXT:    ret <8 x i32> [[T5]]
+;
+  %t0 = zext <8 x i32> %nbits to <8 x i64>
+  %t1 = lshr <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 undef, i64 -1>, %t0
+  %t2 = add <8 x i32> %nbits, <i32 -32, i32 -1, i32 0, i32 1, i32 31, i32 32, i32 undef, i32 64>
+  %t3 = and <8 x i64> %t1, %x
+
+  call void @use8xi64(<8 x i64> %t0)
+  call void @use8xi64(<8 x i64> %t1)
+  call void @use8xi32(<8 x i32> %t2)
+  call void @use8xi64(<8 x i64> %t3)
+
+  %t4 = trunc <8 x i64> %t3 to <8 x i32>
+  %t5 = shl <8 x i32> %t4, %t2
+  ret <8 x i32> %t5
+}
+
+; Extra uses.
+
+define i32 @n4_extrause(i64 %x, i32 %nbits) {
+; CHECK-LABEL: @n4_extrause(
+; CHECK-NEXT:    [[T0:%.*]] = zext i32 [[NBITS:%.*]] to i64
+; CHECK-NEXT:    [[T1:%.*]] = lshr i64 -1, [[T0]]
+; CHECK-NEXT:    [[T2:%.*]] = add i32 [[NBITS]], -32
+; CHECK-NEXT:    [[T3:%.*]] = and i64 [[T1]], [[X:%.*]]
+; CHECK-NEXT:    call void @use64(i64 [[T0]])
+; CHECK-NEXT:    call void @use64(i64 [[T1]])
+; CHECK-NEXT:    call void @use32(i32 [[T2]])
+; CHECK-NEXT:    call void @use64(i64 [[T3]])
+; CHECK-NEXT:    [[T4:%.*]] = trunc i64 [[T3]] to i32
+; CHECK-NEXT:    call void @use32(i32 [[T4]])
+; CHECK-NEXT:    [[T5:%.*]] = shl i32 [[T4]], [[T2]]
+; CHECK-NEXT:    ret i32 [[T5]]
+;
+  %t0 = zext i32 %nbits to i64
+  %t1 = lshr i64 -1, %t0
+  %t2 = add i32 %nbits, -32
+  %t3 = and i64 %t1, %x
+
+  call void @use64(i64 %t0)
+  call void @use64(i64 %t1)
+  call void @use32(i32 %t2)
+  call void @use64(i64 %t3)
+
+  %t4 = trunc i64 %t3 to i32
+  call void @use32(i32 %t4)
+  %t5 = shl i32 %t4, %t2
+  ret i32 %t5
+}
diff --git a/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-d.ll b/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-d.ll
new file mode 100644
index 0000000..eb8ca39
--- /dev/null
+++ b/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-d.ll
@@ -0,0 +1,189 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt %s -instcombine -S | FileCheck %s
+
+; If we have some pattern that leaves only some low bits set, and then performs
+; left-shift of those bits, we can combine those two shifts into a shift+mask.
+
+; There are many variants to this pattern:
+;   d)  (trunc ((x & ((-1 << maskNbits) >> maskNbits)))) << shiftNbits
+; simplify to:
+;   (trunc(x)) << shiftNbits
+
+; Simple tests.
+
+declare void @use32(i32)
+declare void @use64(i64)
+
+define i32 @t0_basic(i64 %x, i32 %nbits) {
+; CHECK-LABEL: @t0_basic(
+; CHECK-NEXT:    [[T0:%.*]] = zext i32 [[NBITS:%.*]] to i64
+; CHECK-NEXT:    [[T1:%.*]] = shl i64 -1, [[T0]]
+; CHECK-NEXT:    [[T2:%.*]] = lshr i64 [[T1]], [[T0]]
+; CHECK-NEXT:    [[T3:%.*]] = add i32 [[NBITS]], -32
+; CHECK-NEXT:    [[T4:%.*]] = and i64 [[T2]], [[X:%.*]]
+; CHECK-NEXT:    call void @use64(i64 [[T0]])
+; CHECK-NEXT:    call void @use64(i64 [[T1]])
+; CHECK-NEXT:    call void @use64(i64 [[T2]])
+; CHECK-NEXT:    call void @use32(i32 [[T3]])
+; CHECK-NEXT:    call void @use64(i64 [[T4]])
+; CHECK-NEXT:    [[T5:%.*]] = trunc i64 [[T4]] to i32
+; CHECK-NEXT:    [[T6:%.*]] = shl i32 [[T5]], [[T3]]
+; CHECK-NEXT:    ret i32 [[T6]]
+;
+  %t0 = zext i32 %nbits to i64
+  %t1 = shl i64 -1, %t0
+  %t2 = lshr i64 %t1, %t0
+  %t3 = add i32 %nbits, -32
+  %t4 = and i64 %t2, %x
+
+  call void @use64(i64 %t0)
+  call void @use64(i64 %t1)
+  call void @use64(i64 %t2)
+  call void @use32(i32 %t3)
+  call void @use64(i64 %t4)
+
+  %t5 = trunc i64 %t4 to i32
+  %t6 = shl i32 %t5, %t3
+  ret i32 %t6
+}
+
+; Vectors
+
+declare void @use8xi32(<8 x i32>)
+declare void @use8xi64(<8 x i64>)
+
+define <8 x i32> @t1_vec_splat(<8 x i64> %x, <8 x i32> %nbits) {
+; CHECK-LABEL: @t1_vec_splat(
+; CHECK-NEXT:    [[T0:%.*]] = zext <8 x i32> [[NBITS:%.*]] to <8 x i64>
+; CHECK-NEXT:    [[T1:%.*]] = shl <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, [[T0]]
+; CHECK-NEXT:    [[T2:%.*]] = lshr <8 x i64> [[T1]], [[T0]]
+; CHECK-NEXT:    [[T3:%.*]] = add <8 x i32> [[NBITS]], <i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 -32>
+; CHECK-NEXT:    [[T4:%.*]] = and <8 x i64> [[T2]], [[X:%.*]]
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T0]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T1]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T2]])
+; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T3]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T4]])
+; CHECK-NEXT:    [[T5:%.*]] = trunc <8 x i64> [[T4]] to <8 x i32>
+; CHECK-NEXT:    [[T6:%.*]] = shl <8 x i32> [[T5]], [[T3]]
+; CHECK-NEXT:    ret <8 x i32> [[T6]]
+;
+  %t0 = zext <8 x i32> %nbits to <8 x i64>
+  %t1 = shl <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, %t0
+  %t2 = lshr <8 x i64> %t1, %t0
+  %t3 = add <8 x i32> %nbits, <i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 -32>
+  %t4 = and <8 x i64> %t2, %x
+
+  call void @use8xi64(<8 x i64> %t0)
+  call void @use8xi64(<8 x i64> %t1)
+  call void @use8xi64(<8 x i64> %t2)
+  call void @use8xi32(<8 x i32> %t3)
+  call void @use8xi64(<8 x i64> %t4)
+
+  %t5 = trunc <8 x i64> %t4 to <8 x i32>
+  %t6 = shl <8 x i32> %t5, %t3
+  ret <8 x i32> %t6
+}
+
+define <8 x i32> @t2_vec_splat_undef(<8 x i64> %x, <8 x i32> %nbits) {
+; CHECK-LABEL: @t2_vec_splat_undef(
+; CHECK-NEXT:    [[T0:%.*]] = zext <8 x i32> [[NBITS:%.*]] to <8 x i64>
+; CHECK-NEXT:    [[T1:%.*]] = shl <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 undef, i64 -1>, [[T0]]
+; CHECK-NEXT:    [[T2:%.*]] = lshr <8 x i64> [[T1]], [[T0]]
+; CHECK-NEXT:    [[T3:%.*]] = add <8 x i32> [[NBITS]], <i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 undef, i32 -32>
+; CHECK-NEXT:    [[T4:%.*]] = and <8 x i64> [[T2]], [[X:%.*]]
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T0]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T1]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T2]])
+; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T3]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T4]])
+; CHECK-NEXT:    [[T5:%.*]] = trunc <8 x i64> [[T4]] to <8 x i32>
+; CHECK-NEXT:    [[T6:%.*]] = shl <8 x i32> [[T5]], [[T3]]
+; CHECK-NEXT:    ret <8 x i32> [[T6]]
+;
+  %t0 = zext <8 x i32> %nbits to <8 x i64>
+  %t1 = shl <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 undef, i64 -1>, %t0
+  %t2 = lshr <8 x i64> %t1, %t0
+  %t3 = add <8 x i32> %nbits, <i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 undef, i32 -32>
+  %t4 = and <8 x i64> %t2, %x
+
+  call void @use8xi64(<8 x i64> %t0)
+  call void @use8xi64(<8 x i64> %t1)
+  call void @use8xi64(<8 x i64> %t2)
+  call void @use8xi32(<8 x i32> %t3)
+  call void @use8xi64(<8 x i64> %t4)
+
+  %t5 = trunc <8 x i64> %t4 to <8 x i32>
+  %t6 = shl <8 x i32> %t5, %t3
+  ret <8 x i32> %t6
+}
+
+define <8 x i32> @t3_vec_nonsplat(<8 x i64> %x, <8 x i32> %nbits) {
+; CHECK-LABEL: @t3_vec_nonsplat(
+; CHECK-NEXT:    [[T0:%.*]] = zext <8 x i32> [[NBITS:%.*]] to <8 x i64>
+; CHECK-NEXT:    [[T1:%.*]] = shl <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 undef, i64 -1>, [[T0]]
+; CHECK-NEXT:    [[T2:%.*]] = lshr <8 x i64> [[T1]], [[T0]]
+; CHECK-NEXT:    [[T3:%.*]] = add <8 x i32> [[NBITS]], <i32 -32, i32 -1, i32 0, i32 1, i32 31, i32 32, i32 undef, i32 64>
+; CHECK-NEXT:    [[T4:%.*]] = and <8 x i64> [[T2]], [[X:%.*]]
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T0]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T1]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T2]])
+; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T3]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T4]])
+; CHECK-NEXT:    [[T5:%.*]] = trunc <8 x i64> [[T4]] to <8 x i32>
+; CHECK-NEXT:    [[T6:%.*]] = shl <8 x i32> [[T5]], [[T3]]
+; CHECK-NEXT:    ret <8 x i32> [[T6]]
+;
+  %t0 = zext <8 x i32> %nbits to <8 x i64>
+  %t1 = shl <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 undef, i64 -1>, %t0
+  %t2 = lshr <8 x i64> %t1, %t0
+  %t3 = add <8 x i32> %nbits, <i32 -32, i32 -1, i32 0, i32 1, i32 31, i32 32, i32 undef, i32 64>
+  %t4 = and <8 x i64> %t2, %x
+
+  call void @use8xi64(<8 x i64> %t0)
+  call void @use8xi64(<8 x i64> %t1)
+  call void @use8xi64(<8 x i64> %t2)
+  call void @use8xi32(<8 x i32> %t3)
+  call void @use8xi64(<8 x i64> %t4)
+
+  %t5 = trunc <8 x i64> %t4 to <8 x i32>
+  %t6 = shl <8 x i32> %t5, %t3
+  ret <8 x i32> %t6
+}
+
+; Extra uses.
+
+define i32 @n4_extrause(i64 %x, i32 %nbits) {
+; CHECK-LABEL: @n4_extrause(
+; CHECK-NEXT:    [[T0:%.*]] = zext i32 [[NBITS:%.*]] to i64
+; CHECK-NEXT:    [[T1:%.*]] = shl i64 -1, [[T0]]
+; CHECK-NEXT:    [[T2:%.*]] = lshr i64 [[T1]], [[T0]]
+; CHECK-NEXT:    [[T3:%.*]] = add i32 [[NBITS]], -32
+; CHECK-NEXT:    [[T4:%.*]] = and i64 [[T2]], [[X:%.*]]
+; CHECK-NEXT:    call void @use64(i64 [[T0]])
+; CHECK-NEXT:    call void @use64(i64 [[T1]])
+; CHECK-NEXT:    call void @use64(i64 [[T2]])
+; CHECK-NEXT:    call void @use32(i32 [[T3]])
+; CHECK-NEXT:    call void @use64(i64 [[T4]])
+; CHECK-NEXT:    [[T5:%.*]] = trunc i64 [[T4]] to i32
+; CHECK-NEXT:    call void @use32(i32 [[T5]])
+; CHECK-NEXT:    [[T6:%.*]] = shl i32 [[T5]], [[T3]]
+; CHECK-NEXT:    ret i32 [[T6]]
+;
+  %t0 = zext i32 %nbits to i64
+  %t1 = shl i64 -1, %t0
+  %t2 = lshr i64 %t1, %t0
+  %t3 = add i32 %nbits, -32
+  %t4 = and i64 %t2, %x
+
+  call void @use64(i64 %t0)
+  call void @use64(i64 %t1)
+  call void @use64(i64 %t2)
+  call void @use32(i32 %t3)
+  call void @use64(i64 %t4)
+
+  %t5 = trunc i64 %t4 to i32
+  call void @use32(i32 %t5)
+  %t6 = shl i32 %t5, %t3
+  ret i32 %t6
+}
diff --git a/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-e.ll b/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-e.ll
new file mode 100644
index 0000000..8db0e36
--- /dev/null
+++ b/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-e.ll
@@ -0,0 +1,169 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt %s -instcombine -S | FileCheck %s
+
+; If we have some pattern that leaves only some low bits set, lshr then performs
+; left-shift of those bits, we can combine those two shifts into a shift+mask.
+
+; There are many variants to this pattern:
+;   e)  (trunc (((x << maskNbits) l>> maskNbits))) << shiftNbits
+; simplify to:
+;   ((trunc(x)) << shiftNbits) & (-1 >> ((-(maskNbits+shiftNbits))+32))
+
+; Simple tests.
+
+declare void @use32(i32)
+declare void @use64(i64)
+
+define i32 @t0_basic(i64 %x, i32 %nbits) {
+; CHECK-LABEL: @t0_basic(
+; CHECK-NEXT:    [[T0:%.*]] = zext i32 [[NBITS:%.*]] to i64
+; CHECK-NEXT:    [[T1:%.*]] = shl i64 [[X:%.*]], [[T0]]
+; CHECK-NEXT:    [[T2:%.*]] = add i32 [[NBITS]], -32
+; CHECK-NEXT:    [[T3:%.*]] = lshr i64 [[T1]], [[T0]]
+; CHECK-NEXT:    call void @use64(i64 [[T0]])
+; CHECK-NEXT:    call void @use64(i64 [[T1]])
+; CHECK-NEXT:    call void @use32(i32 [[T2]])
+; CHECK-NEXT:    call void @use64(i64 [[T3]])
+; CHECK-NEXT:    [[T4:%.*]] = trunc i64 [[T3]] to i32
+; CHECK-NEXT:    [[T5:%.*]] = shl i32 [[T4]], [[T2]]
+; CHECK-NEXT:    ret i32 [[T5]]
+;
+  %t0 = zext i32 %nbits to i64
+  %t1 = shl i64 %x, %t0
+  %t2 = add i32 %nbits, -32
+  %t3 = lshr i64 %t1, %t0
+
+  call void @use64(i64 %t0)
+  call void @use64(i64 %t1)
+  call void @use32(i32 %t2)
+  call void @use64(i64 %t3)
+
+  %t4 = trunc i64 %t3 to i32
+  %t5 = shl i32 %t4, %t2
+  ret i32 %t5
+}
+
+; Vectors
+
+declare void @use8xi32(<8 x i32>)
+declare void @use8xi64(<8 x i64>)
+
+define <8 x i32> @t1_vec_splat(<8 x i64> %x, <8 x i32> %nbits) {
+; CHECK-LABEL: @t1_vec_splat(
+; CHECK-NEXT:    [[T0:%.*]] = zext <8 x i32> [[NBITS:%.*]] to <8 x i64>
+; CHECK-NEXT:    [[T1:%.*]] = shl <8 x i64> [[X:%.*]], [[T0]]
+; CHECK-NEXT:    [[T2:%.*]] = add <8 x i32> [[NBITS]], <i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 -32>
+; CHECK-NEXT:    [[T3:%.*]] = lshr <8 x i64> [[T1]], [[T0]]
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T0]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T1]])
+; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T2]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T3]])
+; CHECK-NEXT:    [[T4:%.*]] = trunc <8 x i64> [[T3]] to <8 x i32>
+; CHECK-NEXT:    [[T5:%.*]] = shl <8 x i32> [[T4]], [[T2]]
+; CHECK-NEXT:    ret <8 x i32> [[T5]]
+;
+  %t0 = zext <8 x i32> %nbits to <8 x i64>
+  %t1 = shl <8 x i64> %x, %t0
+  %t2 = add <8 x i32> %nbits, <i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 -32>
+  %t3 = lshr <8 x i64> %t1, %t0
+
+  call void @use8xi64(<8 x i64> %t0)
+  call void @use8xi64(<8 x i64> %t1)
+  call void @use8xi32(<8 x i32> %t2)
+  call void @use8xi64(<8 x i64> %t3)
+
+  %t4 = trunc <8 x i64> %t3 to <8 x i32>
+  %t5 = shl <8 x i32> %t4, %t2
+  ret <8 x i32> %t5
+}
+
+define <8 x i32> @t2_vec_splat_undef(<8 x i64> %x, <8 x i32> %nbits) {
+; CHECK-LABEL: @t2_vec_splat_undef(
+; CHECK-NEXT:    [[T0:%.*]] = zext <8 x i32> [[NBITS:%.*]] to <8 x i64>
+; CHECK-NEXT:    [[T1:%.*]] = shl <8 x i64> [[X:%.*]], [[T0]]
+; CHECK-NEXT:    [[T2:%.*]] = add <8 x i32> [[NBITS]], <i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 undef, i32 -32>
+; CHECK-NEXT:    [[T3:%.*]] = lshr <8 x i64> [[T1]], [[T0]]
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T0]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T1]])
+; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T2]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T3]])
+; CHECK-NEXT:    [[T4:%.*]] = trunc <8 x i64> [[T3]] to <8 x i32>
+; CHECK-NEXT:    [[T5:%.*]] = shl <8 x i32> [[T4]], [[T2]]
+; CHECK-NEXT:    ret <8 x i32> [[T5]]
+;
+  %t0 = zext <8 x i32> %nbits to <8 x i64>
+  %t1 = shl <8 x i64> %x, %t0
+  %t2 = add <8 x i32> %nbits, <i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 undef, i32 -32>
+  %t3 = lshr <8 x i64> %t1, %t0
+
+  call void @use8xi64(<8 x i64> %t0)
+  call void @use8xi64(<8 x i64> %t1)
+  call void @use8xi32(<8 x i32> %t2)
+  call void @use8xi64(<8 x i64> %t3)
+
+  %t4 = trunc <8 x i64> %t3 to <8 x i32>
+  %t5 = shl <8 x i32> %t4, %t2
+  ret <8 x i32> %t5
+}
+
+define <8 x i32> @t3_vec_nonsplat(<8 x i64> %x, <8 x i32> %nbits) {
+; CHECK-LABEL: @t3_vec_nonsplat(
+; CHECK-NEXT:    [[T0:%.*]] = zext <8 x i32> [[NBITS:%.*]] to <8 x i64>
+; CHECK-NEXT:    [[T1:%.*]] = shl <8 x i64> [[X:%.*]], [[T0]]
+; CHECK-NEXT:    [[T2:%.*]] = add <8 x i32> [[NBITS]], <i32 -32, i32 -1, i32 0, i32 1, i32 31, i32 32, i32 undef, i32 64>
+; CHECK-NEXT:    [[T3:%.*]] = lshr <8 x i64> [[T1]], [[T0]]
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T0]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T1]])
+; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T2]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T3]])
+; CHECK-NEXT:    [[T4:%.*]] = trunc <8 x i64> [[T3]] to <8 x i32>
+; CHECK-NEXT:    [[T5:%.*]] = shl <8 x i32> [[T4]], [[T2]]
+; CHECK-NEXT:    ret <8 x i32> [[T5]]
+;
+  %t0 = zext <8 x i32> %nbits to <8 x i64>
+  %t1 = shl <8 x i64> %x, %t0
+  %t2 = add <8 x i32> %nbits, <i32 -32, i32 -1, i32 0, i32 1, i32 31, i32 32, i32 undef, i32 64>
+  %t3 = lshr <8 x i64> %t1, %t0
+
+  call void @use8xi64(<8 x i64> %t0)
+  call void @use8xi64(<8 x i64> %t1)
+  call void @use8xi32(<8 x i32> %t2)
+  call void @use8xi64(<8 x i64> %t3)
+
+  %t4 = trunc <8 x i64> %t3 to <8 x i32>
+  %t5 = shl <8 x i32> %t4, %t2
+  ret <8 x i32> %t5
+}
+
+; Extra uses.
+
+define i32 @n4_extrause(i64 %x, i32 %nbits) {
+; CHECK-LABEL: @n4_extrause(
+; CHECK-NEXT:    [[T0:%.*]] = zext i32 [[NBITS:%.*]] to i64
+; CHECK-NEXT:    [[T1:%.*]] = shl i64 [[X:%.*]], [[T0]]
+; CHECK-NEXT:    [[T2:%.*]] = add i32 [[NBITS]], -32
+; CHECK-NEXT:    [[T3:%.*]] = lshr i64 [[T1]], [[T0]]
+; CHECK-NEXT:    call void @use64(i64 [[T0]])
+; CHECK-NEXT:    call void @use64(i64 [[T1]])
+; CHECK-NEXT:    call void @use32(i32 [[T2]])
+; CHECK-NEXT:    call void @use64(i64 [[T3]])
+; CHECK-NEXT:    [[T4:%.*]] = trunc i64 [[T3]] to i32
+; CHECK-NEXT:    call void @use32(i32 [[T4]])
+; CHECK-NEXT:    [[T5:%.*]] = shl i32 [[T4]], [[T2]]
+; CHECK-NEXT:    ret i32 [[T5]]
+;
+  %t0 = zext i32 %nbits to i64
+  %t1 = shl i64 %x, %t0
+  %t2 = add i32 %nbits, -32
+  %t3 = lshr i64 %t1, %t0
+
+  call void @use64(i64 %t0)
+  call void @use64(i64 %t1)
+  call void @use32(i32 %t2)
+  call void @use64(i64 %t3)
+
+  %t4 = trunc i64 %t3 to i32
+  call void @use32(i32 %t4)
+  %t5 = shl i32 %t4, %t2
+  ret i32 %t5
+}
diff --git a/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-f.ll b/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-f.ll
new file mode 100644
index 0000000..e692b18
--- /dev/null
+++ b/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-f.ll
@@ -0,0 +1,198 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt %s -instcombine -S | FileCheck %s
+
+; If we have some pattern that leaves only some low bits set, ashr then performs
+; left-shift of those bits, we can combine those two shifts into a shift+mask.
+
+; There are many variants to this pattern:
+;   e)  (trunc (((x << maskNbits) a>> maskNbits))) << shiftNbits
+; simplify to:
+;   (trunc(x)) << shiftNbits
+
+; Simple tests.
+
+declare void @use32(i32)
+declare void @use64(i64)
+
+define i32 @t0_basic(i64 %x, i32 %nbits) {
+; CHECK-LABEL: @t0_basic(
+; CHECK-NEXT:    [[T0:%.*]] = zext i32 [[NBITS:%.*]] to i64
+; CHECK-NEXT:    [[T1:%.*]] = shl i64 [[X:%.*]], [[T0]]
+; CHECK-NEXT:    [[T2:%.*]] = add i32 [[NBITS]], -32
+; CHECK-NEXT:    [[T3:%.*]] = ashr i64 [[T1]], [[T0]]
+; CHECK-NEXT:    call void @use64(i64 [[T0]])
+; CHECK-NEXT:    call void @use64(i64 [[T1]])
+; CHECK-NEXT:    call void @use32(i32 [[T2]])
+; CHECK-NEXT:    call void @use64(i64 [[T3]])
+; CHECK-NEXT:    [[T4:%.*]] = trunc i64 [[T3]] to i32
+; CHECK-NEXT:    [[T5:%.*]] = shl i32 [[T4]], [[T2]]
+; CHECK-NEXT:    ret i32 [[T5]]
+;
+  %t0 = zext i32 %nbits to i64
+  %t1 = shl i64 %x, %t0
+  %t2 = add i32 %nbits, -32
+  %t3 = ashr i64 %t1, %t0
+
+  call void @use64(i64 %t0)
+  call void @use64(i64 %t1)
+  call void @use32(i32 %t2)
+  call void @use64(i64 %t3)
+
+  %t4 = trunc i64 %t3 to i32
+  %t5 = shl i32 %t4, %t2
+  ret i32 %t5
+}
+
+; Vectors
+
+declare void @use8xi32(<8 x i32>)
+declare void @use8xi64(<8 x i64>)
+
+define <8 x i32> @t1_vec_splat(<8 x i64> %x, <8 x i32> %nbits) {
+; CHECK-LABEL: @t1_vec_splat(
+; CHECK-NEXT:    [[T0:%.*]] = zext <8 x i32> [[NBITS:%.*]] to <8 x i64>
+; CHECK-NEXT:    [[T1:%.*]] = shl <8 x i64> [[X:%.*]], [[T0]]
+; CHECK-NEXT:    [[T2:%.*]] = add <8 x i32> [[NBITS]], <i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 -32>
+; CHECK-NEXT:    [[T3:%.*]] = ashr <8 x i64> [[T1]], [[T0]]
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T0]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T1]])
+; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T2]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T3]])
+; CHECK-NEXT:    [[T4:%.*]] = trunc <8 x i64> [[T3]] to <8 x i32>
+; CHECK-NEXT:    [[T5:%.*]] = shl <8 x i32> [[T4]], [[T2]]
+; CHECK-NEXT:    ret <8 x i32> [[T5]]
+;
+  %t0 = zext <8 x i32> %nbits to <8 x i64>
+  %t1 = shl <8 x i64> %x, %t0
+  %t2 = add <8 x i32> %nbits, <i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 -32>
+  %t3 = ashr <8 x i64> %t1, %t0
+
+  call void @use8xi64(<8 x i64> %t0)
+  call void @use8xi64(<8 x i64> %t1)
+  call void @use8xi32(<8 x i32> %t2)
+  call void @use8xi64(<8 x i64> %t3)
+
+  %t4 = trunc <8 x i64> %t3 to <8 x i32>
+  %t5 = shl <8 x i32> %t4, %t2
+  ret <8 x i32> %t5
+}
+
+define <8 x i32> @t2_vec_splat_undef(<8 x i64> %x, <8 x i32> %nbits) {
+; CHECK-LABEL: @t2_vec_splat_undef(
+; CHECK-NEXT:    [[T0:%.*]] = zext <8 x i32> [[NBITS:%.*]] to <8 x i64>
+; CHECK-NEXT:    [[T1:%.*]] = shl <8 x i64> [[X:%.*]], [[T0]]
+; CHECK-NEXT:    [[T2:%.*]] = add <8 x i32> [[NBITS]], <i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 undef, i32 -32>
+; CHECK-NEXT:    [[T3:%.*]] = ashr <8 x i64> [[T1]], [[T0]]
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T0]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T1]])
+; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T2]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T3]])
+; CHECK-NEXT:    [[T4:%.*]] = trunc <8 x i64> [[T3]] to <8 x i32>
+; CHECK-NEXT:    [[T5:%.*]] = shl <8 x i32> [[T4]], [[T2]]
+; CHECK-NEXT:    ret <8 x i32> [[T5]]
+;
+  %t0 = zext <8 x i32> %nbits to <8 x i64>
+  %t1 = shl <8 x i64> %x, %t0
+  %t2 = add <8 x i32> %nbits, <i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 undef, i32 -32>
+  %t3 = ashr <8 x i64> %t1, %t0
+
+  call void @use8xi64(<8 x i64> %t0)
+  call void @use8xi64(<8 x i64> %t1)
+  call void @use8xi32(<8 x i32> %t2)
+  call void @use8xi64(<8 x i64> %t3)
+
+  %t4 = trunc <8 x i64> %t3 to <8 x i32>
+  %t5 = shl <8 x i32> %t4, %t2
+  ret <8 x i32> %t5
+}
+
+define <8 x i32> @t3_vec_nonsplat(<8 x i64> %x, <8 x i32> %nbits) {
+; CHECK-LABEL: @t3_vec_nonsplat(
+; CHECK-NEXT:    [[T0:%.*]] = zext <8 x i32> [[NBITS:%.*]] to <8 x i64>
+; CHECK-NEXT:    [[T1:%.*]] = shl <8 x i64> [[X:%.*]], [[T0]]
+; CHECK-NEXT:    [[T2:%.*]] = add <8 x i32> [[NBITS]], <i32 -32, i32 -1, i32 0, i32 1, i32 31, i32 32, i32 undef, i32 64>
+; CHECK-NEXT:    [[T3:%.*]] = ashr <8 x i64> [[T1]], [[T0]]
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T0]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T1]])
+; CHECK-NEXT:    call void @use8xi32(<8 x i32> [[T2]])
+; CHECK-NEXT:    call void @use8xi64(<8 x i64> [[T3]])
+; CHECK-NEXT:    [[T4:%.*]] = trunc <8 x i64> [[T3]] to <8 x i32>
+; CHECK-NEXT:    [[T5:%.*]] = shl <8 x i32> [[T4]], [[T2]]
+; CHECK-NEXT:    ret <8 x i32> [[T5]]
+;
+  %t0 = zext <8 x i32> %nbits to <8 x i64>
+  %t1 = shl <8 x i64> %x, %t0
+  %t2 = add <8 x i32> %nbits, <i32 -32, i32 -1, i32 0, i32 1, i32 31, i32 32, i32 undef, i32 64>
+  %t3 = ashr <8 x i64> %t1, %t0
+
+  call void @use8xi64(<8 x i64> %t0)
+  call void @use8xi64(<8 x i64> %t1)
+  call void @use8xi32(<8 x i32> %t2)
+  call void @use8xi64(<8 x i64> %t3)
+
+  %t4 = trunc <8 x i64> %t3 to <8 x i32>
+  %t5 = shl <8 x i32> %t4, %t2
+  ret <8 x i32> %t5
+}
+
+; Extra uses.
+
+define i32 @n4_extrause(i64 %x, i32 %nbits) {
+; CHECK-LABEL: @n4_extrause(
+; CHECK-NEXT:    [[T0:%.*]] = zext i32 [[NBITS:%.*]] to i64
+; CHECK-NEXT:    [[T1:%.*]] = shl i64 [[X:%.*]], [[T0]]
+; CHECK-NEXT:    [[T2:%.*]] = add i32 [[NBITS]], -32
+; CHECK-NEXT:    [[T3:%.*]] = ashr i64 [[T1]], [[T0]]
+; CHECK-NEXT:    call void @use64(i64 [[T0]])
+; CHECK-NEXT:    call void @use64(i64 [[T1]])
+; CHECK-NEXT:    call void @use32(i32 [[T2]])
+; CHECK-NEXT:    call void @use64(i64 [[T3]])
+; CHECK-NEXT:    [[T4:%.*]] = trunc i64 [[T3]] to i32
+; CHECK-NEXT:    call void @use32(i32 [[T4]])
+; CHECK-NEXT:    [[T5:%.*]] = shl i32 [[T4]], [[T2]]
+; CHECK-NEXT:    ret i32 [[T5]]
+;
+  %t0 = zext i32 %nbits to i64
+  %t1 = shl i64 %x, %t0
+  %t2 = add i32 %nbits, -32
+  %t3 = ashr i64 %t1, %t0
+
+  call void @use64(i64 %t0)
+  call void @use64(i64 %t1)
+  call void @use32(i32 %t2)
+  call void @use64(i64 %t3)
+
+  %t4 = trunc i64 %t3 to i32
+  call void @use32(i32 %t4)
+  %t5 = shl i32 %t4, %t2
+  ret i32 %t5
+}
+
+; If mask is needed - we can't fold.
+
+define i32 @n5_mask(i64 %x, i32 %nbits) {
+; CHECK-LABEL: @n5_mask(
+; CHECK-NEXT:    [[T0:%.*]] = zext i32 [[NBITS:%.*]] to i64
+; CHECK-NEXT:    [[T1:%.*]] = shl i64 [[X:%.*]], [[T0]]
+; CHECK-NEXT:    [[T2:%.*]] = add i32 [[NBITS]], -33
+; CHECK-NEXT:    call void @use64(i64 [[T0]])
+; CHECK-NEXT:    call void @use64(i64 [[T1]])
+; CHECK-NEXT:    call void @use32(i32 [[T2]])
+; CHECK-NEXT:    [[T3:%.*]] = ashr i64 [[T1]], [[T0]]
+; CHECK-NEXT:    [[T4:%.*]] = trunc i64 [[T3]] to i32
+; CHECK-NEXT:    [[T5:%.*]] = shl i32 [[T4]], [[T2]]
+; CHECK-NEXT:    ret i32 [[T5]]
+;
+  %t0 = zext i32 %nbits to i64
+  %t1 = shl i64 %x, %t0
+  %t2 = add i32 %nbits, -33
+
+  call void @use64(i64 %t0)
+  call void @use64(i64 %t1)
+  call void @use32(i32 %t2)
+
+  %t3 = ashr i64 %t1, %t0
+  %t4 = trunc i64 %t3 to i32
+  %t5 = shl i32 %t4, %t2
+  ret i32 %t5
+}