[CVP] Remove ashr of -1 or 0

Fixes PR#52190. There is already a check for converting ashr instructions with non-negative left-hand sides into lshr; this patch adds an optimization to remove ashr altogether if the left-hand side is known to be in the range [-1, 1).

Differential Revision: https://reviews.llvm.org/D113835
diff --git a/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp b/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
index 6d6097c..a3fd970 100644
--- a/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
+++ b/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
@@ -69,7 +69,8 @@
 STATISTIC(NumSDivs,     "Number of sdiv converted to udiv");
 STATISTIC(NumUDivURemsNarrowed,
           "Number of udivs/urems whose width was decreased");
-STATISTIC(NumAShrs,     "Number of ashr converted to lshr");
+STATISTIC(NumAShrsConverted, "Number of ashr converted to lshr");
+STATISTIC(NumAShrsRemoved, "Number of ashr removed");
 STATISTIC(NumSRems,     "Number of srem converted to urem");
 STATISTIC(NumSExt,      "Number of sext converted to zext");
 STATISTIC(NumSICmps,    "Number of signed icmp preds simplified to unsigned");
@@ -937,10 +938,22 @@
   if (SDI->getType()->isVectorTy())
     return false;
 
+  ConstantRange LRange = LVI->getConstantRange(SDI->getOperand(0), SDI);
+  unsigned OrigWidth = SDI->getType()->getIntegerBitWidth();
+  ConstantRange NegOneOrZero =
+      ConstantRange(APInt(OrigWidth, (uint64_t)-1, true), APInt(OrigWidth, 1));
+  if (NegOneOrZero.contains(LRange)) {
+    // ashr of -1 or 0 never changes the value, so drop the whole instruction
+    ++NumAShrsRemoved;
+    SDI->replaceAllUsesWith(SDI->getOperand(0));
+    SDI->eraseFromParent();
+    return true;
+  }
+
   if (!isNonNegative(SDI->getOperand(0), LVI, SDI))
     return false;
 
-  ++NumAShrs;
+  ++NumAShrsConverted;
   auto *BO = BinaryOperator::CreateLShr(SDI->getOperand(0), SDI->getOperand(1),
                                         SDI->getName(), SDI);
   BO->setDebugLoc(SDI->getDebugLoc());
diff --git a/llvm/test/Transforms/CorrelatedValuePropagation/ashr.ll b/llvm/test/Transforms/CorrelatedValuePropagation/ashr.ll
index 7f80807..b89719e 100644
--- a/llvm/test/Transforms/CorrelatedValuePropagation/ashr.ll
+++ b/llvm/test/Transforms/CorrelatedValuePropagation/ashr.ll
@@ -103,3 +103,40 @@
 exit:
   ret void
 }
+
+; check that ashr of -1 or 0 is optimized away
+; CHECK-LABEL: @test6
+define i32 @test6(i32 %f, i32 %g) {
+entry:
+  %0 = add i32 %f, 1
+  %1 = icmp ult i32 %0, 2
+  tail call void @llvm.assume(i1 %1)
+; CHECK: ret i32 %f
+  %shr = ashr i32 %f, %g
+  ret i32 %shr
+}
+
+; same test as above with different numbers
+; CHECK-LABEL: @test7
+define i32 @test7(i32 %f, i32 %g) {
+entry:
+  %0 = and i32 %f, -2
+  %1 = icmp eq i32 %0, 6
+  tail call void @llvm.assume(i1 %1)
+  %sub = add nsw i32 %f, -7
+; CHECK: ret i32 %sub
+  %shr = ashr i32 %sub, %g
+  ret i32 %shr
+}
+
+; check that ashr of -2 or 1 is not optimized away
+; CHECK-LABEL: @test8
+define i32 @test8(i32 %f, i32 %g, i1 %s) {
+entry:
+; CHECK: ashr i32 -2, %f
+  %0 = ashr i32 -2, %f
+; CHECK: lshr i32 1, %g
+  %1 = ashr i32 1, %g
+  %2 = select i1 %s, i32 %0, i32 %1
+  ret i32 %2
+}