[llvm] Fix comment typos (NFC)

GitOrigin-RevId: a2d45017182a5b98a6a3c75de49e599d67c92fbb
diff --git a/include/llvm/CodeGen/TargetLowering.h b/include/llvm/CodeGen/TargetLowering.h
index 0d43265..432ac5c 100644
--- a/include/llvm/CodeGen/TargetLowering.h
+++ b/include/llvm/CodeGen/TargetLowering.h
@@ -834,8 +834,8 @@
   virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
                                  EVT VT) const;
 
-  /// Return the ValueType for comparison libcalls. Comparions libcalls include
-  /// floating point comparion calls, and Ordered/Unordered check calls on
+  /// Return the ValueType for comparison libcalls. Comparison libcalls include
+  /// floating point comparison calls, and Ordered/Unordered check calls on
   /// floating point numbers.
   virtual
   MVT::SimpleValueType getCmpLibcallReturnType() const;
diff --git a/lib/Analysis/InstructionSimplify.cpp b/lib/Analysis/InstructionSimplify.cpp
index f511d62..9cebbbd 100644
--- a/lib/Analysis/InstructionSimplify.cpp
+++ b/lib/Analysis/InstructionSimplify.cpp
@@ -2660,7 +2660,7 @@
   default:
     return nullptr;
 
-    // Equality comaprisons are easy to fold.
+    // Equality comparisons are easy to fold.
   case CmpInst::ICMP_EQ:
   case CmpInst::ICMP_NE:
     break;
diff --git a/lib/Bitcode/Reader/BitcodeReader.cpp b/lib/Bitcode/Reader/BitcodeReader.cpp
index 379614b..884ea21 100644
--- a/lib/Bitcode/Reader/BitcodeReader.cpp
+++ b/lib/Bitcode/Reader/BitcodeReader.cpp
@@ -2780,7 +2780,7 @@
       } else if (auto *GI = dyn_cast<GlobalIFunc>(GV)) {
         Type *ResolverFTy =
             GlobalIFunc::getResolverFunctionType(GI->getValueType());
-        // Transparently fix up the type for compatiblity with older bitcode
+        // Transparently fix up the type for compatibility with older bitcode
         GI->setResolver(
             ConstantExpr::getBitCast(C, ResolverFTy->getPointerTo()));
       } else {
diff --git a/lib/CodeGen/InlineSpiller.cpp b/lib/CodeGen/InlineSpiller.cpp
index 3ea1d6c..d243cb5 100644
--- a/lib/CodeGen/InlineSpiller.cpp
+++ b/lib/CodeGen/InlineSpiller.cpp
@@ -104,7 +104,7 @@
 
   // Map from pair of (StackSlot and Original VNI) to a set of spills which
   // have the same stackslot and have equal values defined by Original VNI.
-  // These spills are mergeable and are hoist candiates.
+  // These spills are mergeable and are hoist candidates.
   using MergeableSpillsMap =
       MapVector<std::pair<int, VNInfo *>, SmallPtrSet<MachineInstr *, 16>>;
   MergeableSpillsMap MergeableSpills;
diff --git a/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp b/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp
index db4ab3c..968283e 100644
--- a/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp
+++ b/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp
@@ -1931,7 +1931,7 @@
         Result.first->second = P;
     }
 
-    // Accumulate any bitmask operands into the clobberred reg mask for this
+    // Accumulate any bitmask operands into the clobbered reg mask for this
     // block.
     for (auto &P : MTracker->Masks) {
       BlockMasks[CurBB].clearBitsNotInMask(P.first->getRegMask(), BVWords);
diff --git a/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 8a6595f..cbe4ca2 100644
--- a/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -397,7 +397,7 @@
     }
   }
 
-  // Use the target specific return value for comparions lib calls.
+  // Use the target specific return value for comparison lib calls.
   EVT RetVT = getCmpLibcallReturnType();
   SDValue Ops[2] = {NewLHS, NewRHS};
   TargetLowering::MakeLibCallOptions CallOptions;
diff --git a/lib/CodeGen/VLIWMachineScheduler.cpp b/lib/CodeGen/VLIWMachineScheduler.cpp
index 8225d4e..8846097 100644
--- a/lib/CodeGen/VLIWMachineScheduler.cpp
+++ b/lib/CodeGen/VLIWMachineScheduler.cpp
@@ -582,7 +582,7 @@
   for (const auto &P : PD) {
     if (!P.isValid())
       continue;
-    // The pressure differences are computed bottom-up, so the comparision for
+    // The pressure differences are computed bottom-up, so the comparison for
     // an increase is positive in the bottom direction, but negative in the
     //  top-down direction.
     if (HighPressureSets[P.getPSet()])
diff --git a/lib/ExecutionEngine/Orc/Core.cpp b/lib/ExecutionEngine/Orc/Core.cpp
index dd80630..8169568 100644
--- a/lib/ExecutionEngine/Orc/Core.cpp
+++ b/lib/ExecutionEngine/Orc/Core.cpp
@@ -2427,7 +2427,7 @@
 
       // Add any non-candidates from the last JITDylib (if any) back on to the
       // list of definition candidates for this JITDylib, reset definition
-      // non-candiates to the empty set.
+      // non-candidates to the empty set.
       SymbolLookupSet Tmp;
       std::swap(IPLS->DefGeneratorNonCandidates, Tmp);
       IPLS->DefGeneratorCandidates.append(std::move(Tmp));
diff --git a/lib/IR/BuiltinGCs.cpp b/lib/IR/BuiltinGCs.cpp
index e9ef034..dc7cb81 100644
--- a/lib/IR/BuiltinGCs.cpp
+++ b/lib/IR/BuiltinGCs.cpp
@@ -20,7 +20,7 @@
 
 namespace {
 
-/// An example GC which attempts to be compatibile with Erlang/OTP garbage
+/// An example GC which attempts to be compatible with Erlang/OTP garbage
 /// collector.
 ///
 /// The frametable emitter is in ErlangGCPrinter.cpp.
diff --git a/lib/MC/MCParser/AsmLexer.cpp b/lib/MC/MCParser/AsmLexer.cpp
index c3bc3bf..d03fb9c 100644
--- a/lib/MC/MCParser/AsmLexer.cpp
+++ b/lib/MC/MCParser/AsmLexer.cpp
@@ -716,7 +716,7 @@
   if (CommentString.size() == 1)
     return CommentString[0] == Ptr[0];
 
-  // Allow # preprocessor commments also be counted as comments for "##" cases
+  // Allow # preprocessor comments also be counted as comments for "##" cases
   if (CommentString[1] == '#')
     return CommentString[0] == Ptr[0];
 
diff --git a/lib/Target/AArch64/AArch64InstrInfo.cpp b/lib/Target/AArch64/AArch64InstrInfo.cpp
index 835a7b6..1bcca77 100644
--- a/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -1777,7 +1777,7 @@
   return true;
 }
 
-/// Remove comparision in csinc-cmp sequence
+/// Remove comparison in csinc-cmp sequence
 ///
 /// Examples:
 /// 1. \code
diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp
index 74a985e..05febfd 100644
--- a/lib/Target/ARM/ARMISelLowering.cpp
+++ b/lib/Target/ARM/ARMISelLowering.cpp
@@ -9016,7 +9016,7 @@
 
     // Extract the vector elements from Op1 and Op2 one by one and truncate them
     // to be the right size for the destination. For example, if Op1 is v4i1
-    // then the promoted vector is v4i32. The result of concatentation gives a
+    // then the promoted vector is v4i32. The result of concatenation gives a
     // v8i1, which when promoted is v8i16. That means each i32 element from Op1
     // needs truncating to i16 and inserting in the result.
     EVT ConcatVT = MVT::getVectorVT(ElType, NumElts);
diff --git a/lib/Target/ARM/AsmParser/ARMAsmParser.cpp b/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
index a68f7f1..b385e0b 100644
--- a/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
+++ b/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
@@ -302,7 +302,7 @@
     ITInst.addOperand(MCOperand::createImm(ITState.Mask));
     Out.emitInstruction(ITInst, getSTI());
 
-    // Emit the conditonal instructions
+    // Emit the conditional instructions
     assert(PendingConditionalInsts.size() <= 4);
     for (const MCInst &Inst : PendingConditionalInsts) {
       Out.emitInstruction(Inst, getSTI());
diff --git a/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp b/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
index b4979c9..29f6582 100644
--- a/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
+++ b/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
@@ -354,7 +354,7 @@
   return false;
 }
 
-/// Generate a machine instruction node for the new circlar buffer intrinsics.
+/// Generate a machine instruction node for the new circular buffer intrinsics.
 /// The new versions use a CSx register instead of the K field.
 bool HexagonDAGToDAGISel::SelectNewCircIntrinsic(SDNode *IntN) {
   if (IntN->getOpcode() != ISD::INTRINSIC_W_CHAIN)
diff --git a/lib/Target/Hexagon/HexagonInstrInfo.cpp b/lib/Target/Hexagon/HexagonInstrInfo.cpp
index b8671f2..ee92383 100644
--- a/lib/Target/Hexagon/HexagonInstrInfo.cpp
+++ b/lib/Target/Hexagon/HexagonInstrInfo.cpp
@@ -2127,7 +2127,7 @@
          !isMemOp(MI) && !MI.isBranch() && !MI.isReturn() && !MI.isCall();
 }
 
-// Return true if the instruction is a compund branch instruction.
+// Return true if the instruction is a compound branch instruction.
 bool HexagonInstrInfo::isCompoundBranchInstr(const MachineInstr &MI) const {
   return getType(MI) == HexagonII::TypeCJ && MI.isBranch();
 }
diff --git a/lib/Target/PowerPC/PPCMIPeephole.cpp b/lib/Target/PowerPC/PPCMIPeephole.cpp
index 67d91d2..c9d0645 100644
--- a/lib/Target/PowerPC/PPCMIPeephole.cpp
+++ b/lib/Target/PowerPC/PPCMIPeephole.cpp
@@ -1379,7 +1379,7 @@
     bool IsPartiallyRedundant = (MBBtoMoveCmp != nullptr);
 
     // We cannot optimize an unsupported compare opcode or
-    // a mix of 32-bit and 64-bit comaprisons
+    // a mix of 32-bit and 64-bit comparisons
     if (!isSupportedCmpOp(CMPI1->getOpcode()) ||
         !isSupportedCmpOp(CMPI2->getOpcode()) ||
         is64bitCmpOp(CMPI1->getOpcode()) != is64bitCmpOp(CMPI2->getOpcode()))
diff --git a/lib/Target/PowerPC/PPCPreEmitPeephole.cpp b/lib/Target/PowerPC/PPCPreEmitPeephole.cpp
index 82c150b..cd6169a 100644
--- a/lib/Target/PowerPC/PPCPreEmitPeephole.cpp
+++ b/lib/Target/PowerPC/PPCPreEmitPeephole.cpp
@@ -291,7 +291,7 @@
               !BBI->modifiesRegister(Pair.DefReg, TRI))
             continue;
 
-          // The use needs to be used in the address compuation and not
+          // The use needs to be used in the address computation and not
           // as the register being stored for a store.
           const MachineOperand *UseOp =
               hasPCRelativeForm(*BBI) ? &BBI->getOperand(2) : nullptr;
diff --git a/lib/Target/RISCV/RISCVISelLowering.cpp b/lib/Target/RISCV/RISCVISelLowering.cpp
index fca42d7..ac713d9 100644
--- a/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -3259,7 +3259,7 @@
     MVT VT = Op.getSimpleValueType();
     SDLoc DL(Op);
     if (Subtarget.hasStdExtZbp()) {
-      // Convert BSWAP/BITREVERSE to GREVI to enable GREVI combinining.
+      // Convert BSWAP/BITREVERSE to GREVI to enable GREVI combining.
       // Start with the maximum immediate value which is the bitwidth - 1.
       unsigned Imm = VT.getSizeInBits() - 1;
       // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
diff --git a/lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp b/lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp
index ce30d8e..a86e221 100644
--- a/lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp
+++ b/lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp
@@ -102,7 +102,7 @@
     if (CByte) {
       // Handle cases that can be done using at most two of
       // MVI, MVHI, MVHHI and MVGHI.  The latter two can only be
-      // used if ByteVal is all zeros or all ones; in other casees,
+      // used if ByteVal is all zeros or all ones; in other cases,
       // we can move at most 2 halfwords.
       uint64_t ByteVal = CByte->getZExtValue();
       if (ByteVal == 0 || ByteVal == 255 ?
diff --git a/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.cpp b/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.cpp
index 62f7155..c1cd58d 100644
--- a/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.cpp
+++ b/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.cpp
@@ -68,7 +68,7 @@
     case Instruction::Shl:
       // SIMD128's shifts currently only accept a scalar shift count. For each
       // element, we'll need to extract, op, insert. The following is a rough
-      // approxmation.
+      // approximation.
       if (Opd2Info != TTI::OK_UniformValue &&
           Opd2Info != TTI::OK_UniformConstantValue)
         Cost =
diff --git a/lib/Target/X86/X86ISelDAGToDAG.cpp b/lib/Target/X86/X86ISelDAGToDAG.cpp
index b8afe7e..cc13b5d 100644
--- a/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -5241,7 +5241,7 @@
 
     SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
     bool foldedLoad = tryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
-    // Multiply is commmutative.
+    // Multiply is commutative.
     if (!foldedLoad) {
       foldedLoad = tryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
       if (foldedLoad)
diff --git a/lib/Target/X86/X86MCInstLower.cpp b/lib/Target/X86/X86MCInstLower.cpp
index d1d5e1f..1c08cf3 100644
--- a/lib/Target/X86/X86MCInstLower.cpp
+++ b/lib/Target/X86/X86MCInstLower.cpp
@@ -1401,7 +1401,7 @@
     if (MinSize == 2 && Subtarget->is32Bit() &&
         Subtarget->isTargetWindowsMSVC() &&
         (Subtarget->getCPU().empty() || Subtarget->getCPU() == "pentium3")) {
-      // For compatibilty reasons, when targetting MSVC, is is important to
+      // For compatibility reasons, when targetting MSVC, is is important to
       // generate a 'legacy' NOP in the form of a 8B FF MOV EDI, EDI. Some tools
       // rely specifically on this pattern to be able to patch a function.
       // This is only for 32-bit targets, when using /arch:IA32 or /arch:SSE.
diff --git a/lib/Target/X86/X86TargetTransformInfo.cpp b/lib/Target/X86/X86TargetTransformInfo.cpp
index b27aac9..aa99316 100644
--- a/lib/Target/X86/X86TargetTransformInfo.cpp
+++ b/lib/Target/X86/X86TargetTransformInfo.cpp
@@ -4247,7 +4247,7 @@
   if (!ST->hasAVX512())
     return Cost + LT.first * (IsLoad ? 2 : 8);
 
-  // AVX-512 masked load/store is cheapper
+  // AVX-512 masked load/store is cheaper
   return Cost + LT.first;
 }
 
diff --git a/lib/Transforms/IPO/AttributorAttributes.cpp b/lib/Transforms/IPO/AttributorAttributes.cpp
index 78d3410..6cf3188 100644
--- a/lib/Transforms/IPO/AttributorAttributes.cpp
+++ b/lib/Transforms/IPO/AttributorAttributes.cpp
@@ -4296,7 +4296,7 @@
         } else if (OffsetSExt > 0) {
           // If something was stripped but there is circular reasoning we look
           // for the offset. If it is positive we basically decrease the
-          // dereferenceable bytes in a circluar loop now, which will simply
+          // dereferenceable bytes in a circular loop now, which will simply
           // drive them down to the known value in a very slow way which we
           // can accelerate.
           T.indicatePessimisticFixpoint();
@@ -5447,7 +5447,7 @@
     return nullptr;
   }
 
-  /// Helper function for querying AAValueSimplify and updating candicate.
+  /// Helper function for querying AAValueSimplify and updating candidate.
   /// \param IRP The value position we are trying to unify with SimplifiedValue
   bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
                       const IRPosition &IRP, bool Simplify = true) {
@@ -5586,7 +5586,7 @@
       if (!askSimplifiedValueForOtherAAs(A))
         return indicatePessimisticFixpoint();
 
-    // If a candicate was found in this update, return CHANGED.
+    // If a candidate was found in this update, return CHANGED.
     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
                                                : ChangeStatus ::CHANGED;
   }
@@ -5625,7 +5625,7 @@
       if (!askSimplifiedValueForOtherAAs(A))
         return indicatePessimisticFixpoint();
 
-    // If a candicate was found in this update, return CHANGED.
+    // If a candidate was found in this update, return CHANGED.
     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
                                                : ChangeStatus ::CHANGED;
   }
@@ -5662,7 +5662,7 @@
     if (!askSimplifiedValueForOtherAAs(A))
       return indicatePessimisticFixpoint();
 
-    // If a candicate was found in this update, return CHANGED.
+    // If a candidate was found in this update, return CHANGED.
     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
                                                : ChangeStatus ::CHANGED;
   }
diff --git a/lib/Transforms/Instrumentation/ControlHeightReduction.cpp b/lib/Transforms/Instrumentation/ControlHeightReduction.cpp
index adc007d..abe928f 100644
--- a/lib/Transforms/Instrumentation/ControlHeightReduction.cpp
+++ b/lib/Transforms/Instrumentation/ControlHeightReduction.cpp
@@ -1983,7 +1983,7 @@
     findScopes(AllScopes);
     CHR_DEBUG(dumpScopes(AllScopes, "All scopes"));
 
-    // Split the scopes if 1) the conditiona values of the biased
+    // Split the scopes if 1) the conditional values of the biased
     // branches/selects of the inner/lower scope can't be hoisted up to the
     // outermost/uppermost scope entry, or 2) the condition values of the biased
     // branches/selects in a scope (including subscopes) don't share at least
diff --git a/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp b/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
index 6815688..dfdf9c0 100644
--- a/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
+++ b/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
@@ -1464,7 +1464,7 @@
       //   br i1 icmp ne (i8 (i8)* @my_func, i8 (i8)* null), label %use_my_func,
       //   label %avoid_my_func
       // The @"dfsw$my_func" wrapper is never null, so if we replace this use
-      // in the comparision, the icmp will simplify to false and we have
+      // in the comparison, the icmp will simplify to false and we have
       // accidentially optimized away a null check that is necessary.
       // This can lead to a crash when the null extern_weak my_func is called.
       //
diff --git a/lib/Transforms/Scalar/LoopFlatten.cpp b/lib/Transforms/Scalar/LoopFlatten.cpp
index f36193f..e1eeb1d 100644
--- a/lib/Transforms/Scalar/LoopFlatten.cpp
+++ b/lib/Transforms/Scalar/LoopFlatten.cpp
@@ -540,7 +540,7 @@
       // they make a net difference of zero.
       if (IterationInstructions.count(&I))
         continue;
-      // The uncoditional branch to the inner loop's header will turn into
+      // The unconditional branch to the inner loop's header will turn into
       // a fall-through, so adds no cost.
       BranchInst *Br = dyn_cast<BranchInst>(&I);
       if (Br && Br->isUnconditional() &&
diff --git a/lib/Transforms/Scalar/LoopFuse.cpp b/lib/Transforms/Scalar/LoopFuse.cpp
index da752c9..d873644 100644
--- a/lib/Transforms/Scalar/LoopFuse.cpp
+++ b/lib/Transforms/Scalar/LoopFuse.cpp
@@ -699,7 +699,7 @@
   /// stating whether or not the two candidates are known at compile time to
   /// have the same TripCount. The second is the difference in the two
   /// TripCounts. This information can be used later to determine whether or not
-  /// peeling can be performed on either one of the candiates.
+  /// peeling can be performed on either one of the candidates.
   std::pair<bool, Optional<unsigned>>
   haveIdenticalTripCounts(const FusionCandidate &FC0,
                           const FusionCandidate &FC1) const {
diff --git a/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/lib/Transforms/Scalar/LoopStrengthReduce.cpp
index a3434f8..0a17b77 100644
--- a/lib/Transforms/Scalar/LoopStrengthReduce.cpp
+++ b/lib/Transforms/Scalar/LoopStrengthReduce.cpp
@@ -146,7 +146,7 @@
   "enable-lsr-phielim", cl::Hidden, cl::init(true),
   cl::desc("Enable LSR phi elimination"));
 
-// The flag adds instruction count to solutions cost comparision.
+// The flag adds instruction count to solutions cost comparison.
 static cl::opt<bool> InsnsCost(
   "lsr-insns-cost", cl::Hidden, cl::init(true),
   cl::desc("Add instruction count to a LSR cost model"));
diff --git a/lib/Transforms/Utils/CanonicalizeAliases.cpp b/lib/Transforms/Utils/CanonicalizeAliases.cpp
index 9101a1e..4d62267 100644
--- a/lib/Transforms/Utils/CanonicalizeAliases.cpp
+++ b/lib/Transforms/Utils/CanonicalizeAliases.cpp
@@ -16,7 +16,7 @@
 //  @a = alias i8, i8 *@g  <-- @a is now an alias to base object @g
 //  @b = alias i8, i8 *@g
 //
-// Eventually this file will implement full alias canonicalation, so that
+// Eventually this file will implement full alias canonicalization, so that
 // all aliasees are private anonymous values. E.g.
 //  @a = alias i8, i8 *@g
 //  @g = global i8 0
diff --git a/lib/Transforms/Utils/CodeLayout.cpp b/lib/Transforms/Utils/CodeLayout.cpp
index 1ff0f14..243dd09 100644
--- a/lib/Transforms/Utils/CodeLayout.cpp
+++ b/lib/Transforms/Utils/CodeLayout.cpp
@@ -778,7 +778,7 @@
 
   /// Merge two chains of blocks respecting a given merge 'type' and 'offset'.
   ///
-  /// If MergeType == 0, then the result is a concatentation of two chains.
+  /// If MergeType == 0, then the result is a concatenation of two chains.
   /// Otherwise, the first chain is cut into two sub-chains at the offset,
   /// and merged using all possible ways of concatenating three chains.
   MergedChain mergeBlocks(const std::vector<Block *> &X,
diff --git a/lib/Transforms/Utils/ScalarEvolutionExpander.cpp b/lib/Transforms/Utils/ScalarEvolutionExpander.cpp
index 372cd74..d0dfbc1 100644
--- a/lib/Transforms/Utils/ScalarEvolutionExpander.cpp
+++ b/lib/Transforms/Utils/ScalarEvolutionExpander.cpp
@@ -1508,7 +1508,7 @@
 Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
   // In canonical mode we compute the addrec as an expression of a canonical IV
   // using evaluateAtIteration and expand the resulting SCEV expression. This
-  // way we avoid introducing new IVs to carry on the comutation of the addrec
+  // way we avoid introducing new IVs to carry on the computation of the addrec
   // throughout the loop.
   //
   // For nested addrecs evaluateAtIteration might need a canonical IV of a
@@ -2191,7 +2191,7 @@
   }
   case scAddRecExpr: {
     // In this polynominal, we may have some zero operands, and we shouldn't
-    // really charge for those. So how many non-zero coeffients are there?
+    // really charge for those. So how many non-zero coefficients are there?
     int NumTerms = llvm::count_if(S->operands(), [](const SCEV *Op) {
                                     return !Op->isZero();
                                   });
@@ -2200,7 +2200,7 @@
     assert(!(*std::prev(S->operands().end()))->isZero() &&
            "Last operand should not be zero");
 
-    // Ignoring constant term (operand 0), how many of the coeffients are u> 1?
+    // Ignoring constant term (operand 0), how many of the coefficients are u> 1?
     int NumNonZeroDegreeNonOneTerms =
       llvm::count_if(S->operands(), [](const SCEV *Op) {
                       auto *SConst = dyn_cast<SCEVConstant>(Op);
diff --git a/lib/Transforms/Utils/SimplifyCFG.cpp b/lib/Transforms/Utils/SimplifyCFG.cpp
index bba8315..164a916 100644
--- a/lib/Transforms/Utils/SimplifyCFG.cpp
+++ b/lib/Transforms/Utils/SimplifyCFG.cpp
@@ -7016,7 +7016,7 @@
         IRBuilder<> Builder(T);
         if (BranchInst *BI = dyn_cast<BranchInst>(T)) {
           BB->removePredecessor(Predecessor);
-          // Turn uncoditional branches into unreachables and remove the dead
+          // Turn unconditional branches into unreachables and remove the dead
           // destination from conditional branches.
           if (BI->isUnconditional())
             Builder.CreateUnreachable();
diff --git a/lib/Transforms/Vectorize/LoopVectorize.cpp b/lib/Transforms/Vectorize/LoopVectorize.cpp
index 1fcb4cb..af956aa 100644
--- a/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -3063,7 +3063,7 @@
   // 1) If we know that we must execute the scalar epilogue, emit an
   //    unconditional branch.
   // 2) Otherwise, we must have a single unique exit block (due to how we
-  //    implement the multiple exit case).  In this case, set up a conditonal
+  //    implement the multiple exit case).  In this case, set up a conditional
   //    branch from the middle block to the loop scalar preheader, and the
   //    exit block.  completeLoopSkeleton will update the condition to use an
   //    iteration check, if required to decide whether to execute the remainder.
diff --git a/tools/llvm-profgen/PerfReader.h b/tools/llvm-profgen/PerfReader.h
index 3ffed99..5accea0 100644
--- a/tools/llvm-profgen/PerfReader.h
+++ b/tools/llvm-profgen/PerfReader.h
@@ -119,7 +119,7 @@
 // only changes the leaf of frame stack. \fn isEqual is a virtual function,
 // which will have perf overhead. In the future, if we redesign a better hash
 // function, then we can just skip this or switch to non-virtual function(like
-// just ignore comparision if hash conflicts probabilities is low)
+// just ignore comparison if hash conflicts probabilities is low)
 template <class T> class Hashable {
 public:
   std::shared_ptr<T> Data;
diff --git a/tools/verify-uselistorder/verify-uselistorder.cpp b/tools/verify-uselistorder/verify-uselistorder.cpp
index e28c33d..9afe681 100644
--- a/tools/verify-uselistorder/verify-uselistorder.cpp
+++ b/tools/verify-uselistorder/verify-uselistorder.cpp
@@ -398,7 +398,7 @@
     return;
 
   // Generate random numbers between 10 and 99, which will line up nicely in
-  // debug output.  We're not worried about collisons here.
+  // debug output.  We're not worried about collisions here.
   LLVM_DEBUG(dbgs() << "V = "; V->dump());
   std::uniform_int_distribution<short> Dist(10, 99);
   SmallDenseMap<const Use *, short, 16> Order;
diff --git a/utils/TableGen/DecoderEmitter.cpp b/utils/TableGen/DecoderEmitter.cpp
index 8477e06..5b0990a 100644
--- a/utils/TableGen/DecoderEmitter.cpp
+++ b/utils/TableGen/DecoderEmitter.cpp
@@ -2254,7 +2254,7 @@
 // fieldFromInstruction().
 // On Windows we make sure that this function is not inlined when
 // using the VS compiler. It has a bug which causes the function
-// to be optimized out in some circustances. See llvm.org/pr38292
+// to be optimized out in some circumstances. See llvm.org/pr38292
 static void emitFieldFromInstruction(formatted_raw_ostream &OS) {
   OS << "// Helper functions for extracting fields from encoded instructions.\n"
      << "// InsnType must either be integral or an APInt-like object that "
diff --git a/utils/UnicodeData/UnicodeNameMappingGenerator.cpp b/utils/UnicodeData/UnicodeNameMappingGenerator.cpp
index 69ed865..49fbff1 100644
--- a/utils/UnicodeData/UnicodeNameMappingGenerator.cpp
+++ b/utils/UnicodeData/UnicodeNameMappingGenerator.cpp
@@ -247,7 +247,7 @@
       } else {
         // When there is no value (that's most intermediate nodes)
         // Dispense of the 3 values bytes, and only store
-        // 1 byte to track whether the node has sibling and chidren
+        // 1 byte to track whether the node has sibling and children
         // + 2 bytes for the index of the first children if necessary.
         // That index also uses bytes 0-6 of the previous byte.
         uint8_t Byte =
diff --git a/utils/git/github-automation.py b/utils/git/github-automation.py
index 61e43c5..f29adca 100755
--- a/utils/git/github-automation.py
+++ b/utils/git/github-automation.py
@@ -313,7 +313,7 @@
     def create_pull_request(self, owner:str, repo_name:str, branch:str) -> bool:
         """
         reate a pull request in `self.branch_repo_name`.  The base branch of the
-        pull request will be choosen based on the the milestone attached to
+        pull request will be chosen based on the the milestone attached to
         the issue represented by `self.issue_number`  For example if the milestone
         is Release 13.0.1, then the base branch will be release/13.x. `branch`
         will be used as the compare branch.