[lld-macho] Support --thinlto-jobs

The test is loosely based off LLD-ELF's `thinlto.ll`. However, I
found that test questionable because the the -save_temps behavior it
checks for is identical regardless of whether we are running in single-
or multi-threaded mode. I tried writing a test based on `--time-trace`
but couldn't get it to run deterministically... so I've opted to just
skip checking that behavior for now.

Reviewed By: #lld-macho, gkm

Differential Revision: https://reviews.llvm.org/D99356

GitOrigin-RevId: 050a7a27ca844a912e46cdfeed7a472847ad0bc9
diff --git a/MachO/Config.h b/MachO/Config.h
index 5c9faa8..693c348 100644
--- a/MachO/Config.h
+++ b/MachO/Config.h
@@ -98,6 +98,7 @@
   llvm::StringRef mapFile;
   llvm::StringRef outputFile;
   llvm::StringRef ltoObjPath;
+  llvm::StringRef thinLTOJobs;
   bool demangle = false;
   llvm::MachO::Target target;
   PlatformInfo platformInfo;
diff --git a/MachO/Driver.cpp b/MachO/Driver.cpp
index 736e456..7f34157 100644
--- a/MachO/Driver.cpp
+++ b/MachO/Driver.cpp
@@ -893,8 +893,12 @@
       error(arg->getSpelling() + ": expected a positive integer, but got '" +
             arg->getValue() + "'");
     parallel::strategy = hardware_concurrency(threads);
-    // FIXME: use this to configure ThinLTO concurrency too
+    config->thinLTOJobs = v;
   }
+  if (auto *arg = args.getLastArg(OPT_thinlto_jobs_eq))
+    config->thinLTOJobs = arg->getValue();
+  if (!get_threadpool_strategy(config->thinLTOJobs))
+    error("--thinlto-jobs: invalid job count: " + config->thinLTOJobs);
 
   config->entry = symtab->addUndefined(args.getLastArgValue(OPT_e, "_main"),
                                        /*file=*/nullptr,
diff --git a/MachO/LTO.cpp b/MachO/LTO.cpp
index 7b55835..70c555f 100644
--- a/MachO/LTO.cpp
+++ b/MachO/LTO.cpp
@@ -43,8 +43,8 @@
 }
 
 BitcodeCompiler::BitcodeCompiler() {
-  lto::ThinBackend backend =
-      lto::createInProcessThinBackend(heavyweight_hardware_concurrency());
+  lto::ThinBackend backend = lto::createInProcessThinBackend(
+      heavyweight_hardware_concurrency(config->thinLTOJobs));
   ltoObj = std::make_unique<lto::LTO>(createConfig(), backend);
 }
 
diff --git a/MachO/Options.td b/MachO/Options.td
index 90be967..e4bdc4b 100644
--- a/MachO/Options.td
+++ b/MachO/Options.td
@@ -23,6 +23,9 @@
 def threads_eq : Joined<["--"], "threads=">,
     HelpText<"Number of threads. '1' disables multi-threading. By default all available hardware threads are used">,
     Group<grp_lld>;
+def thinlto_jobs_eq : Joined<["--"], "thinlto-jobs=">,
+    HelpText<"Number of ThinLTO jobs. Default to --threads=">,
+    Group<grp_lld>;
 def reproduce: Separate<["--"], "reproduce">,
     Group<grp_lld>;
 def reproduce_eq: Joined<["--"], "reproduce=">,
diff --git a/test/MachO/thinlto-jobs.ll b/test/MachO/thinlto-jobs.ll
new file mode 100644
index 0000000..c35595f
--- /dev/null
+++ b/test/MachO/thinlto-jobs.ll
@@ -0,0 +1,38 @@
+; REQUIRES: x86
+; RUN: rm -rf %t; split-file %s %t
+
+;; I'm not aware of a deterministic way to verify whether LTO is running in
+;; single- or multi-threaded mode. So this test simply checks that we can parse
+;; the --thinlto-jobs flag correctly, but doesn't verify its effect.
+
+; RUN: opt -module-summary %t/f.s -o %t/f.o
+; RUN: opt -module-summary %t/g.s -o %t/g.o
+
+; RUN: %lld --time-trace --thinlto-jobs=1 -dylib %t/f.o %t/g.o -o %t/out
+; RUN: %lld --time-trace --thinlto-jobs=2 -dylib %t/f.o %t/g.o -o %t/out
+; RUN: %lld --thinlto-jobs=all -dylib %t/f.o %t/g.o -o /dev/null
+
+;; Test with a bad value
+; RUN: not %lld --thinlto-jobs=foo -dylib %t/f.o %t/g.o -o /dev/null 2>&1 | FileCheck %s
+; CHECK: error: --thinlto-jobs: invalid job count: foo
+
+;--- f.s
+target triple = "x86_64-apple-darwin"
+target datalayout = "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
+
+declare void @g(...)
+
+define void @f() {
+entry:
+  call void (...) @g()
+  ret void
+}
+
+;--- g.s
+target triple = "x86_64-apple-darwin"
+target datalayout = "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
+
+define void @g() {
+entry:
+  ret void
+}