[OpenMP] Miscellaneous small code improvements (#95603)

Removes a few uninitialized variables, possible resource leaks, and
redundant code.

GitOrigin-RevId: 598970904736f3535939f6a5525022219e4ae517
diff --git a/runtime/src/kmp.h b/runtime/src/kmp.h
index 916c1dc..4f4fb32 100644
--- a/runtime/src/kmp.h
+++ b/runtime/src/kmp.h
@@ -4731,6 +4731,8 @@
       : f(nullptr) {
     open(filename, mode, env_var);
   }
+  kmp_safe_raii_file_t(const kmp_safe_raii_file_t &other) = delete;
+  kmp_safe_raii_file_t &operator=(const kmp_safe_raii_file_t &other) = delete;
   ~kmp_safe_raii_file_t() { close(); }
 
   /// Open filename using mode. This is automatically closed in the destructor.
diff --git a/runtime/src/kmp_affinity.cpp b/runtime/src/kmp_affinity.cpp
index dd48b1e..cf5cad0 100644
--- a/runtime/src/kmp_affinity.cpp
+++ b/runtime/src/kmp_affinity.cpp
@@ -1970,7 +1970,6 @@
             hw_thread.ids[index + 1] = sub_id;
             index--;
           }
-          prev = memory;
         }
         prev = obj;
       }
@@ -4989,7 +4988,7 @@
   int depth = __kmp_topology->get_depth();
 
   // Create the table of masks, indexed by thread Id.
-  unsigned numUnique;
+  unsigned numUnique = 0;
   int numAddrs = __kmp_topology->get_num_hw_threads();
   // If OMP_PLACES=cores:<attribute> specified, then attempt
   // to make OS Id mask table using those attributes
diff --git a/runtime/src/kmp_affinity.h b/runtime/src/kmp_affinity.h
index ed24b6f..9ab2c0c 100644
--- a/runtime/src/kmp_affinity.h
+++ b/runtime/src/kmp_affinity.h
@@ -29,6 +29,8 @@
       mask = hwloc_bitmap_alloc();
       this->zero();
     }
+    Mask(const Mask &other) = delete;
+    Mask &operator=(const Mask &other) = delete;
     ~Mask() { hwloc_bitmap_free(mask); }
     void set(int i) override { hwloc_bitmap_set(mask, i); }
     bool is_set(int i) const override { return hwloc_bitmap_isset(mask, i); }
@@ -1271,7 +1273,7 @@
       leaf. It corresponds to the number of entries in numPerLevel if we exclude
       all but one trailing 1. */
   kmp_uint32 depth;
-  kmp_uint32 base_num_threads;
+  kmp_uint32 base_num_threads = 0;
   enum init_status { initialized = 0, not_initialized = 1, initializing = 2 };
   volatile kmp_int8 uninitialized; // 0=initialized, 1=not initialized,
   // 2=initialization in progress
@@ -1281,8 +1283,8 @@
       the parent of a node at level i has. For example, if we have a machine
       with 4 packages, 4 cores/package and 2 HT per core, then numPerLevel =
       {2, 4, 4, 1, 1}. All empty levels are set to 1. */
-  kmp_uint32 *numPerLevel;
-  kmp_uint32 *skipPerLevel;
+  kmp_uint32 *numPerLevel = nullptr;
+  kmp_uint32 *skipPerLevel = nullptr;
 
   void deriveLevels() {
     int hier_depth = __kmp_topology->get_depth();
diff --git a/runtime/src/kmp_barrier.cpp b/runtime/src/kmp_barrier.cpp
index 658cee5..d7ef57c 100644
--- a/runtime/src/kmp_barrier.cpp
+++ b/runtime/src/kmp_barrier.cpp
@@ -444,7 +444,8 @@
       next_go = my_current_iter + distributedBarrier::MAX_ITERS;
       my_go_index = tid / b->threads_per_go;
       if (this_thr->th.th_used_in_team.load() == 3) {
-        KMP_COMPARE_AND_STORE_ACQ32(&(this_thr->th.th_used_in_team), 3, 1);
+        (void)KMP_COMPARE_AND_STORE_ACQ32(&(this_thr->th.th_used_in_team), 3,
+                                          1);
       }
       // Check if go flag is set
       if (b->go[my_go_index].go.load() != next_go) {
diff --git a/runtime/src/kmp_csupport.cpp b/runtime/src/kmp_csupport.cpp
index b33c16f..fdbf9ff 100644
--- a/runtime/src/kmp_csupport.cpp
+++ b/runtime/src/kmp_csupport.cpp
@@ -1589,7 +1589,7 @@
   kmp_dyna_lockseq_t lockseq = __kmp_map_hint_to_lock(hint);
   if (*lk == 0) {
     if (KMP_IS_D_LOCK(lockseq)) {
-      KMP_COMPARE_AND_STORE_ACQ32(
+      (void)KMP_COMPARE_AND_STORE_ACQ32(
           (volatile kmp_int32 *)&((kmp_base_tas_lock_t *)crit)->poll, 0,
           KMP_GET_D_TAG(lockseq));
     } else {
@@ -3486,8 +3486,8 @@
   // Check if it is initialized.
   if (*lk == 0) {
     if (KMP_IS_D_LOCK(__kmp_user_lock_seq)) {
-      KMP_COMPARE_AND_STORE_ACQ32((volatile kmp_int32 *)crit, 0,
-                                  KMP_GET_D_TAG(__kmp_user_lock_seq));
+      (void)KMP_COMPARE_AND_STORE_ACQ32((volatile kmp_int32 *)crit, 0,
+                                        KMP_GET_D_TAG(__kmp_user_lock_seq));
     } else {
       __kmp_init_indirect_csptr(crit, loc, global_tid,
                                 KMP_GET_I_TAG(__kmp_user_lock_seq));
diff --git a/runtime/src/kmp_runtime.cpp b/runtime/src/kmp_runtime.cpp
index 5b4391a..06bc493 100644
--- a/runtime/src/kmp_runtime.cpp
+++ b/runtime/src/kmp_runtime.cpp
@@ -1983,8 +1983,8 @@
 
 #if OMPT_SUPPORT
     ompt_data_t ompt_parallel_data = ompt_data_none;
-    ompt_data_t *parent_task_data;
-    ompt_frame_t *ompt_frame;
+    ompt_data_t *parent_task_data = NULL;
+    ompt_frame_t *ompt_frame = NULL;
     void *return_address = NULL;
 
     if (ompt_enabled.enabled) {
@@ -5765,8 +5765,8 @@
     for (f = 1; f < team->t.t_nproc; ++f) {
       KMP_DEBUG_ASSERT(team->t.t_threads[f]);
       if (__kmp_barrier_gather_pattern[bs_forkjoin_barrier] == bp_dist_bar) {
-        KMP_COMPARE_AND_STORE_ACQ32(&(team->t.t_threads[f]->th.th_used_in_team),
-                                    1, 2);
+        (void)KMP_COMPARE_AND_STORE_ACQ32(
+            &(team->t.t_threads[f]->th.th_used_in_team), 1, 2);
       }
       __kmp_free_thread(team->t.t_threads[f]);
     }
@@ -9220,8 +9220,8 @@
   // to wake it up.
   for (int f = 1; f < new_nthreads; ++f) {
     KMP_DEBUG_ASSERT(team->t.t_threads[f]);
-    KMP_COMPARE_AND_STORE_ACQ32(&(team->t.t_threads[f]->th.th_used_in_team), 0,
-                                3);
+    (void)KMP_COMPARE_AND_STORE_ACQ32(
+        &(team->t.t_threads[f]->th.th_used_in_team), 0, 3);
     if (__kmp_dflt_blocktime != KMP_MAX_BLOCKTIME) { // Wake up sleeping threads
       __kmp_resume_32(team->t.t_threads[f]->th.th_info.ds.ds_gtid,
                       (kmp_flag_32<false, false> *)NULL);
diff --git a/runtime/src/kmp_tasking.cpp b/runtime/src/kmp_tasking.cpp
index 03ce0dd..7edaa8e 100644
--- a/runtime/src/kmp_tasking.cpp
+++ b/runtime/src/kmp_tasking.cpp
@@ -5276,7 +5276,7 @@
   switch (sched) {
   case 0: // no schedule clause specified, we can choose the default
     // let's try to schedule (team_size*10) tasks
-    grainsize = thread->th.th_team_nproc * 10;
+    grainsize = thread->th.th_team_nproc * static_cast<kmp_uint64>(10);
     KMP_FALLTHROUGH();
   case 2: // num_tasks provided
     if (grainsize > tc) {
diff --git a/runtime/src/kmp_wait_release.h b/runtime/src/kmp_wait_release.h
index 97db689..9baf280 100644
--- a/runtime/src/kmp_wait_release.h
+++ b/runtime/src/kmp_wait_release.h
@@ -104,7 +104,8 @@
 template <flag_type FlagType> class kmp_flag {
 protected:
   flag_properties t; /**< "Type" of the flag in loc */
-  kmp_info_t *waiting_threads[1]; /**< Threads sleeping on this thread. */
+  /**< Threads sleeping on this thread. */
+  kmp_info_t *waiting_threads[1] = {nullptr};
   kmp_uint32 num_waiting_threads; /**< Num threads sleeping on this thread. */
   std::atomic<bool> *sleepLoc;
 
@@ -140,7 +141,7 @@
 class kmp_flag_native : public kmp_flag<FlagType> {
 protected:
   volatile PtrType *loc;
-  PtrType checker; /**< When flag==checker, it has been released. */
+  PtrType checker = (PtrType)0; /**< When flag==checker, it has been released */
   typedef flag_traits<FlagType> traits_type;
 
 public:
@@ -234,7 +235,7 @@
 class kmp_flag_atomic : public kmp_flag<FlagType> {
 protected:
   std::atomic<PtrType> *loc; /**< Pointer to flag location to wait on */
-  PtrType checker; /**< Flag == checker means it has been released. */
+  PtrType checker = (PtrType)0; /**< Flag==checker means it has been released */
 public:
   typedef flag_traits<FlagType> traits_type;
   typedef PtrType flag_t;
@@ -935,7 +936,8 @@
   kmp_uint32 offset; /**< Portion of flag of interest for an operation. */
   bool flag_switch; /**< Indicates a switch in flag location. */
   enum barrier_type bt; /**< Barrier type. */
-  kmp_info_t *this_thr; /**< Thread to redirect to different flag location. */
+  /**< Thread to redirect to different flag location. */
+  kmp_info_t *this_thr = nullptr;
 #if USE_ITT_BUILD
   void *itt_sync_obj; /**< ITT object to pass to new flag location. */
 #endif
diff --git a/runtime/src/ompt-general.cpp b/runtime/src/ompt-general.cpp
index e07c5ff..923eea2 100644
--- a/runtime/src/ompt-general.cpp
+++ b/runtime/src/ompt-general.cpp
@@ -104,9 +104,11 @@
 
 #if KMP_OS_WINDOWS
 static HMODULE ompt_tool_module = NULL;
+static HMODULE ompt_archer_module = NULL;
 #define OMPT_DLCLOSE(Lib) FreeLibrary(Lib)
 #else
 static void *ompt_tool_module = NULL;
+static void *ompt_archer_module = NULL;
 #define OMPT_DLCLOSE(Lib) dlclose(Lib)
 #endif
 
@@ -374,6 +376,7 @@
               "Tool was started and is using the OMPT interface.\n");
           OMPT_VERBOSE_INIT_PRINT(
               "----- END LOGGING OF TOOL REGISTRATION -----\n");
+          ompt_archer_module = h;
           return ret;
         }
         OMPT_VERBOSE_INIT_CONTINUED_PRINT(
@@ -381,6 +384,7 @@
       } else {
         OMPT_VERBOSE_INIT_CONTINUED_PRINT("Failed: %s\n", dlerror());
       }
+      OMPT_DLCLOSE(h);
     }
   }
 #endif
@@ -521,6 +525,8 @@
     }
   }
 
+  if (ompt_archer_module)
+    OMPT_DLCLOSE(ompt_archer_module);
   if (ompt_tool_module)
     OMPT_DLCLOSE(ompt_tool_module);
   memset(&ompt_enabled, 0, sizeof(ompt_enabled));