| /* | 
 |  * z_Windows_NT-586_util.cpp -- platform specific routines. | 
 |  */ | 
 |  | 
 | //===----------------------------------------------------------------------===// | 
 | // | 
 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | 
 | // See https://llvm.org/LICENSE.txt for license information. | 
 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | 
 | // | 
 | //===----------------------------------------------------------------------===// | 
 |  | 
 | #include "kmp.h" | 
 |  | 
 | #if (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_AARCH64 || KMP_ARCH_ARM) | 
 | /* Only 32-bit "add-exchange" instruction on IA-32 architecture causes us to | 
 |    use compare_and_store for these routines */ | 
 |  | 
 | kmp_int8 __kmp_test_then_or8(volatile kmp_int8 *p, kmp_int8 d) { | 
 |   kmp_int8 old_value, new_value; | 
 |  | 
 |   old_value = TCR_1(*p); | 
 |   new_value = old_value | d; | 
 |  | 
 |   while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) { | 
 |     KMP_CPU_PAUSE(); | 
 |     old_value = TCR_1(*p); | 
 |     new_value = old_value | d; | 
 |   } | 
 |   return old_value; | 
 | } | 
 |  | 
 | kmp_int8 __kmp_test_then_and8(volatile kmp_int8 *p, kmp_int8 d) { | 
 |   kmp_int8 old_value, new_value; | 
 |  | 
 |   old_value = TCR_1(*p); | 
 |   new_value = old_value & d; | 
 |  | 
 |   while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) { | 
 |     KMP_CPU_PAUSE(); | 
 |     old_value = TCR_1(*p); | 
 |     new_value = old_value & d; | 
 |   } | 
 |   return old_value; | 
 | } | 
 |  | 
 | kmp_uint32 __kmp_test_then_or32(volatile kmp_uint32 *p, kmp_uint32 d) { | 
 |   kmp_uint32 old_value, new_value; | 
 |  | 
 |   old_value = TCR_4(*p); | 
 |   new_value = old_value | d; | 
 |  | 
 |   while (!KMP_COMPARE_AND_STORE_REL32((volatile kmp_int32 *)p, old_value, | 
 |                                       new_value)) { | 
 |     KMP_CPU_PAUSE(); | 
 |     old_value = TCR_4(*p); | 
 |     new_value = old_value | d; | 
 |   } | 
 |   return old_value; | 
 | } | 
 |  | 
 | kmp_uint32 __kmp_test_then_and32(volatile kmp_uint32 *p, kmp_uint32 d) { | 
 |   kmp_uint32 old_value, new_value; | 
 |  | 
 |   old_value = TCR_4(*p); | 
 |   new_value = old_value & d; | 
 |  | 
 |   while (!KMP_COMPARE_AND_STORE_REL32((volatile kmp_int32 *)p, old_value, | 
 |                                       new_value)) { | 
 |     KMP_CPU_PAUSE(); | 
 |     old_value = TCR_4(*p); | 
 |     new_value = old_value & d; | 
 |   } | 
 |   return old_value; | 
 | } | 
 |  | 
 | #if KMP_ARCH_X86 || KMP_ARCH_X86_64 | 
 | kmp_int8 __kmp_test_then_add8(volatile kmp_int8 *p, kmp_int8 d) { | 
 |   kmp_int64 old_value, new_value; | 
 |  | 
 |   old_value = TCR_1(*p); | 
 |   new_value = old_value + d; | 
 |   while (!__kmp_compare_and_store8(p, old_value, new_value)) { | 
 |     KMP_CPU_PAUSE(); | 
 |     old_value = TCR_1(*p); | 
 |     new_value = old_value + d; | 
 |   } | 
 |   return old_value; | 
 | } | 
 |  | 
 | #if KMP_ARCH_X86 | 
 | kmp_int64 __kmp_test_then_add64(volatile kmp_int64 *p, kmp_int64 d) { | 
 |   kmp_int64 old_value, new_value; | 
 |  | 
 |   old_value = TCR_8(*p); | 
 |   new_value = old_value + d; | 
 |   while (!__kmp_compare_and_store64(p, old_value, new_value)) { | 
 |     KMP_CPU_PAUSE(); | 
 |     old_value = TCR_8(*p); | 
 |     new_value = old_value + d; | 
 |   } | 
 |   return old_value; | 
 | } | 
 | #endif /* KMP_ARCH_X86 */ | 
 | #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ | 
 |  | 
 | kmp_uint64 __kmp_test_then_or64(volatile kmp_uint64 *p, kmp_uint64 d) { | 
 |   kmp_uint64 old_value, new_value; | 
 |  | 
 |   old_value = TCR_8(*p); | 
 |   new_value = old_value | d; | 
 |   while (!KMP_COMPARE_AND_STORE_REL64((volatile kmp_int64 *)p, old_value, | 
 |                                       new_value)) { | 
 |     KMP_CPU_PAUSE(); | 
 |     old_value = TCR_8(*p); | 
 |     new_value = old_value | d; | 
 |   } | 
 |  | 
 |   return old_value; | 
 | } | 
 |  | 
 | kmp_uint64 __kmp_test_then_and64(volatile kmp_uint64 *p, kmp_uint64 d) { | 
 |   kmp_uint64 old_value, new_value; | 
 |  | 
 |   old_value = TCR_8(*p); | 
 |   new_value = old_value & d; | 
 |   while (!KMP_COMPARE_AND_STORE_REL64((volatile kmp_int64 *)p, old_value, | 
 |                                       new_value)) { | 
 |     KMP_CPU_PAUSE(); | 
 |     old_value = TCR_8(*p); | 
 |     new_value = old_value & d; | 
 |   } | 
 |  | 
 |   return old_value; | 
 | } | 
 |  | 
 | #if KMP_ARCH_AARCH64 && KMP_COMPILER_MSVC | 
 | // For !KMP_COMPILER_MSVC, this function is provided in assembly form | 
 | // by z_Linux_asm.S. | 
 | int __kmp_invoke_microtask(microtask_t pkfn, int gtid, int tid, int argc, | 
 |                            void *p_argv[] | 
 | #if OMPT_SUPPORT | 
 |                            , | 
 |                            void **exit_frame_ptr | 
 | #endif | 
 | ) { | 
 | #if OMPT_SUPPORT | 
 |   *exit_frame_ptr = OMPT_GET_FRAME_ADDRESS(0); | 
 | #endif | 
 |  | 
 |   switch (argc) { | 
 |   case 0: | 
 |     (*pkfn)(>id, &tid); | 
 |     break; | 
 |   case 1: | 
 |     (*pkfn)(>id, &tid, p_argv[0]); | 
 |     break; | 
 |   case 2: | 
 |     (*pkfn)(>id, &tid, p_argv[0], p_argv[1]); | 
 |     break; | 
 |   case 3: | 
 |     (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2]); | 
 |     break; | 
 |   case 4: | 
 |     (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3]); | 
 |     break; | 
 |   case 5: | 
 |     (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4]); | 
 |     break; | 
 |   default: { | 
 |     // p_argv[6] and onwards must be passed on the stack since 8 registers are | 
 |     // already used. | 
 |     size_t len = (argc - 6) * sizeof(void *); | 
 |     void *argbuf = alloca(len); | 
 |     memcpy(argbuf, &p_argv[6], len); | 
 |   } | 
 |     [[fallthrough]]; | 
 |   case 6: | 
 |     (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4], | 
 |             p_argv[5]); | 
 |     break; | 
 |   } | 
 |  | 
 | #if OMPT_SUPPORT | 
 |   *exit_frame_ptr = 0; | 
 | #endif | 
 |  | 
 |   return 1; | 
 | } | 
 | #endif | 
 |  | 
 | #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_AARCH64 || KMP_ARCH_ARM */ |