| /* |
| * kmp_lock.cpp -- lock-related functions |
| */ |
| |
| |
| //===----------------------------------------------------------------------===// |
| // |
| // The LLVM Compiler Infrastructure |
| // |
| // This file is dual licensed under the MIT and the University of Illinois Open |
| // Source Licenses. See LICENSE.txt for details. |
| // |
| //===----------------------------------------------------------------------===// |
| |
| |
| #include <stddef.h> |
| #include <atomic> |
| |
| #include "kmp.h" |
| #include "kmp_itt.h" |
| #include "kmp_i18n.h" |
| #include "kmp_lock.h" |
| #include "kmp_io.h" |
| |
| #include "tsan_annotations.h" |
| |
| #if KMP_USE_FUTEX |
| # include <unistd.h> |
| # include <sys/syscall.h> |
| // We should really include <futex.h>, but that causes compatibility problems on different |
| // Linux* OS distributions that either require that you include (or break when you try to include) |
| // <pci/types.h>. |
| // Since all we need is the two macros below (which are part of the kernel ABI, so can't change) |
| // we just define the constants here and don't include <futex.h> |
| # ifndef FUTEX_WAIT |
| # define FUTEX_WAIT 0 |
| # endif |
| # ifndef FUTEX_WAKE |
| # define FUTEX_WAKE 1 |
| # endif |
| #endif |
| |
| /* Implement spin locks for internal library use. */ |
| /* The algorithm implemented is Lamport's bakery lock [1974]. */ |
| |
| void |
| __kmp_validate_locks( void ) |
| { |
| int i; |
| kmp_uint32 x, y; |
| |
| /* Check to make sure unsigned arithmetic does wraps properly */ |
| x = ~((kmp_uint32) 0) - 2; |
| y = x - 2; |
| |
| for (i = 0; i < 8; ++i, ++x, ++y) { |
| kmp_uint32 z = (x - y); |
| KMP_ASSERT( z == 2 ); |
| } |
| |
| KMP_ASSERT( offsetof( kmp_base_queuing_lock, tail_id ) % 8 == 0 ); |
| } |
| |
| |
| /* ------------------------------------------------------------------------ */ |
| /* test and set locks */ |
| |
| // |
| // For the non-nested locks, we can only assume that the first 4 bytes were |
| // allocated, since gcc only allocates 4 bytes for omp_lock_t, and the Intel |
| // compiler only allocates a 4 byte pointer on IA-32 architecture. On |
| // Windows* OS on Intel(R) 64, we can assume that all 8 bytes were allocated. |
| // |
| // gcc reserves >= 8 bytes for nested locks, so we can assume that the |
| // entire 8 bytes were allocated for nested locks on all 64-bit platforms. |
| // |
| |
| static kmp_int32 |
| __kmp_get_tas_lock_owner( kmp_tas_lock_t *lck ) |
| { |
| return KMP_LOCK_STRIP(TCR_4( lck->lk.poll )) - 1; |
| } |
| |
| static inline bool |
| __kmp_is_tas_lock_nestable( kmp_tas_lock_t *lck ) |
| { |
| return lck->lk.depth_locked != -1; |
| } |
| |
| __forceinline static int |
| __kmp_acquire_tas_lock_timed_template( kmp_tas_lock_t *lck, kmp_int32 gtid ) |
| { |
| KMP_MB(); |
| |
| #ifdef USE_LOCK_PROFILE |
| kmp_uint32 curr = KMP_LOCK_STRIP( TCR_4( lck->lk.poll ) ); |
| if ( ( curr != 0 ) && ( curr != gtid + 1 ) ) |
| __kmp_printf( "LOCK CONTENTION: %p\n", lck ); |
| /* else __kmp_printf( "." );*/ |
| #endif /* USE_LOCK_PROFILE */ |
| |
| if ( ( lck->lk.poll == KMP_LOCK_FREE(tas) ) |
| && KMP_COMPARE_AND_STORE_ACQ32( & ( lck->lk.poll ), KMP_LOCK_FREE(tas), KMP_LOCK_BUSY(gtid+1, tas) ) ) { |
| KMP_FSYNC_ACQUIRED(lck); |
| return KMP_LOCK_ACQUIRED_FIRST; |
| } |
| |
| kmp_uint32 spins; |
| KMP_FSYNC_PREPARE( lck ); |
| KMP_INIT_YIELD( spins ); |
| if ( TCR_4( __kmp_nth ) > ( __kmp_avail_proc ? __kmp_avail_proc : |
| __kmp_xproc ) ) { |
| KMP_YIELD( TRUE ); |
| } |
| else { |
| KMP_YIELD_SPIN( spins ); |
| } |
| |
| kmp_backoff_t backoff = __kmp_spin_backoff_params; |
| while ( ( lck->lk.poll != KMP_LOCK_FREE(tas) ) || |
| ( ! KMP_COMPARE_AND_STORE_ACQ32( & ( lck->lk.poll ), KMP_LOCK_FREE(tas), KMP_LOCK_BUSY(gtid+1, tas) ) ) ) { |
| |
| __kmp_spin_backoff(&backoff); |
| if ( TCR_4( __kmp_nth ) > ( __kmp_avail_proc ? __kmp_avail_proc : |
| __kmp_xproc ) ) { |
| KMP_YIELD( TRUE ); |
| } |
| else { |
| KMP_YIELD_SPIN( spins ); |
| } |
| } |
| KMP_FSYNC_ACQUIRED( lck ); |
| return KMP_LOCK_ACQUIRED_FIRST; |
| } |
| |
| int |
| __kmp_acquire_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid ) |
| { |
| int retval = __kmp_acquire_tas_lock_timed_template( lck, gtid ); |
| ANNOTATE_TAS_ACQUIRED(lck); |
| return retval; |
| } |
| |
| static int |
| __kmp_acquire_tas_lock_with_checks( kmp_tas_lock_t *lck, kmp_int32 gtid ) |
| { |
| char const * const func = "omp_set_lock"; |
| if ( ( sizeof ( kmp_tas_lock_t ) <= OMP_LOCK_T_SIZE ) |
| && __kmp_is_tas_lock_nestable( lck ) ) { |
| KMP_FATAL( LockNestableUsedAsSimple, func ); |
| } |
| if ( ( gtid >= 0 ) && ( __kmp_get_tas_lock_owner( lck ) == gtid ) ) { |
| KMP_FATAL( LockIsAlreadyOwned, func ); |
| } |
| return __kmp_acquire_tas_lock( lck, gtid ); |
| } |
| |
| int |
| __kmp_test_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid ) |
| { |
| if ( ( lck->lk.poll == KMP_LOCK_FREE(tas) ) |
| && KMP_COMPARE_AND_STORE_ACQ32( & ( lck->lk.poll ), KMP_LOCK_FREE(tas), KMP_LOCK_BUSY(gtid+1, tas) ) ) { |
| KMP_FSYNC_ACQUIRED( lck ); |
| return TRUE; |
| } |
| return FALSE; |
| } |
| |
| static int |
| __kmp_test_tas_lock_with_checks( kmp_tas_lock_t *lck, kmp_int32 gtid ) |
| { |
| char const * const func = "omp_test_lock"; |
| if ( ( sizeof ( kmp_tas_lock_t ) <= OMP_LOCK_T_SIZE ) |
| && __kmp_is_tas_lock_nestable( lck ) ) { |
| KMP_FATAL( LockNestableUsedAsSimple, func ); |
| } |
| return __kmp_test_tas_lock( lck, gtid ); |
| } |
| |
| int |
| __kmp_release_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid ) |
| { |
| KMP_MB(); /* Flush all pending memory write invalidates. */ |
| |
| KMP_FSYNC_RELEASING(lck); |
| ANNOTATE_TAS_RELEASED(lck); |
| KMP_ST_REL32( &(lck->lk.poll), KMP_LOCK_FREE(tas) ); |
| KMP_MB(); /* Flush all pending memory write invalidates. */ |
| |
| KMP_YIELD( TCR_4( __kmp_nth ) > ( __kmp_avail_proc ? __kmp_avail_proc : |
| __kmp_xproc ) ); |
| return KMP_LOCK_RELEASED; |
| } |
| |
| static int |
| __kmp_release_tas_lock_with_checks( kmp_tas_lock_t *lck, kmp_int32 gtid ) |
| { |
| char const * const func = "omp_unset_lock"; |
| KMP_MB(); /* in case another processor initialized lock */ |
| if ( ( sizeof ( kmp_tas_lock_t ) <= OMP_LOCK_T_SIZE ) |
| && __kmp_is_tas_lock_nestable( lck ) ) { |
| KMP_FATAL( LockNestableUsedAsSimple, func ); |
| } |
| if ( __kmp_get_tas_lock_owner( lck ) == -1 ) { |
| KMP_FATAL( LockUnsettingFree, func ); |
| } |
| if ( ( gtid >= 0 ) && ( __kmp_get_tas_lock_owner( lck ) >= 0 ) |
| && ( __kmp_get_tas_lock_owner( lck ) != gtid ) ) { |
| KMP_FATAL( LockUnsettingSetByAnother, func ); |
| } |
| return __kmp_release_tas_lock( lck, gtid ); |
| } |
| |
| void |
| __kmp_init_tas_lock( kmp_tas_lock_t * lck ) |
| { |
| TCW_4( lck->lk.poll, KMP_LOCK_FREE(tas) ); |
| } |
| |
| static void |
| __kmp_init_tas_lock_with_checks( kmp_tas_lock_t * lck ) |
| { |
| __kmp_init_tas_lock( lck ); |
| } |
| |
| void |
| __kmp_destroy_tas_lock( kmp_tas_lock_t *lck ) |
| { |
| lck->lk.poll = 0; |
| } |
| |
| static void |
| __kmp_destroy_tas_lock_with_checks( kmp_tas_lock_t *lck ) |
| { |
| char const * const func = "omp_destroy_lock"; |
| if ( ( sizeof ( kmp_tas_lock_t ) <= OMP_LOCK_T_SIZE ) |
| && __kmp_is_tas_lock_nestable( lck ) ) { |
| KMP_FATAL( LockNestableUsedAsSimple, func ); |
| } |
| if ( __kmp_get_tas_lock_owner( lck ) != -1 ) { |
| KMP_FATAL( LockStillOwned, func ); |
| } |
| __kmp_destroy_tas_lock( lck ); |
| } |
| |
| |
| // |
| // nested test and set locks |
| // |
| |
| int |
| __kmp_acquire_nested_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid ) |
| { |
| KMP_DEBUG_ASSERT( gtid >= 0 ); |
| |
| if ( __kmp_get_tas_lock_owner( lck ) == gtid ) { |
| lck->lk.depth_locked += 1; |
| return KMP_LOCK_ACQUIRED_NEXT; |
| } |
| else { |
| __kmp_acquire_tas_lock_timed_template( lck, gtid ); |
| ANNOTATE_TAS_ACQUIRED(lck); |
| lck->lk.depth_locked = 1; |
| return KMP_LOCK_ACQUIRED_FIRST; |
| } |
| } |
| |
| static int |
| __kmp_acquire_nested_tas_lock_with_checks( kmp_tas_lock_t *lck, kmp_int32 gtid ) |
| { |
| char const * const func = "omp_set_nest_lock"; |
| if ( ! __kmp_is_tas_lock_nestable( lck ) ) { |
| KMP_FATAL( LockSimpleUsedAsNestable, func ); |
| } |
| return __kmp_acquire_nested_tas_lock( lck, gtid ); |
| } |
| |
| int |
| __kmp_test_nested_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid ) |
| { |
| int retval; |
| |
| KMP_DEBUG_ASSERT( gtid >= 0 ); |
| |
| if ( __kmp_get_tas_lock_owner( lck ) == gtid ) { |
| retval = ++lck->lk.depth_locked; |
| } |
| else if ( !__kmp_test_tas_lock( lck, gtid ) ) { |
| retval = 0; |
| } |
| else { |
| KMP_MB(); |
| retval = lck->lk.depth_locked = 1; |
| } |
| return retval; |
| } |
| |
| static int |
| __kmp_test_nested_tas_lock_with_checks( kmp_tas_lock_t *lck, kmp_int32 gtid ) |
| { |
| char const * const func = "omp_test_nest_lock"; |
| if ( ! __kmp_is_tas_lock_nestable( lck ) ) { |
| KMP_FATAL( LockSimpleUsedAsNestable, func ); |
| } |
| return __kmp_test_nested_tas_lock( lck, gtid ); |
| } |
| |
| int |
| __kmp_release_nested_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid ) |
| { |
| KMP_DEBUG_ASSERT( gtid >= 0 ); |
| |
| KMP_MB(); |
| if ( --(lck->lk.depth_locked) == 0 ) { |
| __kmp_release_tas_lock( lck, gtid ); |
| return KMP_LOCK_RELEASED; |
| } |
| return KMP_LOCK_STILL_HELD; |
| } |
| |
| static int |
| __kmp_release_nested_tas_lock_with_checks( kmp_tas_lock_t *lck, kmp_int32 gtid ) |
| { |
| char const * const func = "omp_unset_nest_lock"; |
| KMP_MB(); /* in case another processor initialized lock */ |
| if ( ! __kmp_is_tas_lock_nestable( lck ) ) { |
| KMP_FATAL( LockSimpleUsedAsNestable, func ); |
| } |
| if ( __kmp_get_tas_lock_owner( lck ) == -1 ) { |
| KMP_FATAL( LockUnsettingFree, func ); |
| } |
| if ( __kmp_get_tas_lock_owner( lck ) != gtid ) { |
| KMP_FATAL( LockUnsettingSetByAnother, func ); |
| } |
| return __kmp_release_nested_tas_lock( lck, gtid ); |
| } |
| |
| void |
| __kmp_init_nested_tas_lock( kmp_tas_lock_t * lck ) |
| { |
| __kmp_init_tas_lock( lck ); |
| lck->lk.depth_locked = 0; // >= 0 for nestable locks, -1 for simple locks |
| } |
| |
| static void |
| __kmp_init_nested_tas_lock_with_checks( kmp_tas_lock_t * lck ) |
| { |
| __kmp_init_nested_tas_lock( lck ); |
| } |
| |
| void |
| __kmp_destroy_nested_tas_lock( kmp_tas_lock_t *lck ) |
| { |
| __kmp_destroy_tas_lock( lck ); |
| lck->lk.depth_locked = 0; |
| } |
| |
| static void |
| __kmp_destroy_nested_tas_lock_with_checks( kmp_tas_lock_t *lck ) |
| { |
| char const * const func = "omp_destroy_nest_lock"; |
| if ( ! __kmp_is_tas_lock_nestable( lck ) ) { |
| KMP_FATAL( LockSimpleUsedAsNestable, func ); |
| } |
| if ( __kmp_get_tas_lock_owner( lck ) != -1 ) { |
| KMP_FATAL( LockStillOwned, func ); |
| } |
| __kmp_destroy_nested_tas_lock( lck ); |
| } |
| |
| |
| #if KMP_USE_FUTEX |
| |
| /* ------------------------------------------------------------------------ */ |
| /* futex locks */ |
| |
| // futex locks are really just test and set locks, with a different method |
| // of handling contention. They take the same amount of space as test and |
| // set locks, and are allocated the same way (i.e. use the area allocated by |
| // the compiler for non-nested locks / allocate nested locks on the heap). |
| |
| static kmp_int32 |
| __kmp_get_futex_lock_owner( kmp_futex_lock_t *lck ) |
| { |
| return KMP_LOCK_STRIP(( TCR_4( lck->lk.poll ) >> 1 )) - 1; |
| } |
| |
| static inline bool |
| __kmp_is_futex_lock_nestable( kmp_futex_lock_t *lck ) |
| { |
| return lck->lk.depth_locked != -1; |
| } |
| |
| __forceinline static int |
| __kmp_acquire_futex_lock_timed_template( kmp_futex_lock_t *lck, kmp_int32 gtid ) |
| { |
| kmp_int32 gtid_code = ( gtid + 1 ) << 1; |
| |
| KMP_MB(); |
| |
| #ifdef USE_LOCK_PROFILE |
| kmp_uint32 curr = KMP_LOCK_STRIP( TCR_4( lck->lk.poll ) ); |
| if ( ( curr != 0 ) && ( curr != gtid_code ) ) |
| __kmp_printf( "LOCK CONTENTION: %p\n", lck ); |
| /* else __kmp_printf( "." );*/ |
| #endif /* USE_LOCK_PROFILE */ |
| |
| KMP_FSYNC_PREPARE( lck ); |
| KA_TRACE( 1000, ("__kmp_acquire_futex_lock: lck:%p(0x%x), T#%d entering\n", |
| lck, lck->lk.poll, gtid ) ); |
| |
| kmp_int32 poll_val; |
| |
| while ( ( poll_val = KMP_COMPARE_AND_STORE_RET32( & ( lck->lk.poll ), KMP_LOCK_FREE(futex), |
| KMP_LOCK_BUSY(gtid_code, futex) ) ) != KMP_LOCK_FREE(futex) ) { |
| |
| kmp_int32 cond = KMP_LOCK_STRIP(poll_val) & 1; |
| KA_TRACE( 1000, ("__kmp_acquire_futex_lock: lck:%p, T#%d poll_val = 0x%x cond = 0x%x\n", |
| lck, gtid, poll_val, cond ) ); |
| |
| // |
| // NOTE: if you try to use the following condition for this branch |
| // |
| // if ( poll_val & 1 == 0 ) |
| // |
| // Then the 12.0 compiler has a bug where the following block will |
| // always be skipped, regardless of the value of the LSB of poll_val. |
| // |
| if ( ! cond ) { |
| // |
| // Try to set the lsb in the poll to indicate to the owner |
| // thread that they need to wake this thread up. |
| // |
| if ( ! KMP_COMPARE_AND_STORE_REL32( & ( lck->lk.poll ), poll_val, poll_val | KMP_LOCK_BUSY(1, futex) ) ) { |
| KA_TRACE( 1000, ("__kmp_acquire_futex_lock: lck:%p(0x%x), T#%d can't set bit 0\n", |
| lck, lck->lk.poll, gtid ) ); |
| continue; |
| } |
| poll_val |= KMP_LOCK_BUSY(1, futex); |
| |
| KA_TRACE( 1000, ("__kmp_acquire_futex_lock: lck:%p(0x%x), T#%d bit 0 set\n", |
| lck, lck->lk.poll, gtid ) ); |
| } |
| |
| KA_TRACE( 1000, ("__kmp_acquire_futex_lock: lck:%p, T#%d before futex_wait(0x%x)\n", |
| lck, gtid, poll_val ) ); |
| |
| kmp_int32 rc; |
| if ( ( rc = syscall( __NR_futex, & ( lck->lk.poll ), FUTEX_WAIT, |
| poll_val, NULL, NULL, 0 ) ) != 0 ) { |
| KA_TRACE( 1000, ("__kmp_acquire_futex_lock: lck:%p, T#%d futex_wait(0x%x) failed (rc=%d errno=%d)\n", |
| lck, gtid, poll_val, rc, errno ) ); |
| continue; |
| } |
| |
| KA_TRACE( 1000, ("__kmp_acquire_futex_lock: lck:%p, T#%d after futex_wait(0x%x)\n", |
| lck, gtid, poll_val ) ); |
| // |
| // This thread has now done a successful futex wait call and was |
| // entered on the OS futex queue. We must now perform a futex |
| // wake call when releasing the lock, as we have no idea how many |
| // other threads are in the queue. |
| // |
| gtid_code |= 1; |
| } |
| |
| KMP_FSYNC_ACQUIRED( lck ); |
| KA_TRACE( 1000, ("__kmp_acquire_futex_lock: lck:%p(0x%x), T#%d exiting\n", |
| lck, lck->lk.poll, gtid ) ); |
| return KMP_LOCK_ACQUIRED_FIRST; |
| } |
| |
| int |
| __kmp_acquire_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid ) |
| { |
| int retval = __kmp_acquire_futex_lock_timed_template( lck, gtid ); |
| ANNOTATE_FUTEX_ACQUIRED(lck); |
| return retval; |
| } |
| |
| static int |
| __kmp_acquire_futex_lock_with_checks( kmp_futex_lock_t *lck, kmp_int32 gtid ) |
| { |
| char const * const func = "omp_set_lock"; |
| if ( ( sizeof ( kmp_futex_lock_t ) <= OMP_LOCK_T_SIZE ) |
| && __kmp_is_futex_lock_nestable( lck ) ) { |
| KMP_FATAL( LockNestableUsedAsSimple, func ); |
| } |
| if ( ( gtid >= 0 ) && ( __kmp_get_futex_lock_owner( lck ) == gtid ) ) { |
| KMP_FATAL( LockIsAlreadyOwned, func ); |
| } |
| return __kmp_acquire_futex_lock( lck, gtid ); |
| } |
| |
| int |
| __kmp_test_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid ) |
| { |
| if ( KMP_COMPARE_AND_STORE_ACQ32( & ( lck->lk.poll ), KMP_LOCK_FREE(futex), KMP_LOCK_BUSY((gtid+1) << 1, futex) ) ) { |
| KMP_FSYNC_ACQUIRED( lck ); |
| return TRUE; |
| } |
| return FALSE; |
| } |
| |
| static int |
| __kmp_test_futex_lock_with_checks( kmp_futex_lock_t *lck, kmp_int32 gtid ) |
| { |
| char const * const func = "omp_test_lock"; |
| if ( ( sizeof ( kmp_futex_lock_t ) <= OMP_LOCK_T_SIZE ) |
| && __kmp_is_futex_lock_nestable( lck ) ) { |
| KMP_FATAL( LockNestableUsedAsSimple, func ); |
| } |
| return __kmp_test_futex_lock( lck, gtid ); |
| } |
| |
| int |
| __kmp_release_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid ) |
| { |
| KMP_MB(); /* Flush all pending memory write invalidates. */ |
| |
| KA_TRACE( 1000, ("__kmp_release_futex_lock: lck:%p(0x%x), T#%d entering\n", |
| lck, lck->lk.poll, gtid ) ); |
| |
| KMP_FSYNC_RELEASING(lck); |
| ANNOTATE_FUTEX_RELEASED(lck); |
| |
| kmp_int32 poll_val = KMP_XCHG_FIXED32( & ( lck->lk.poll ), KMP_LOCK_FREE(futex) ); |
| |
| KA_TRACE( 1000, ("__kmp_release_futex_lock: lck:%p, T#%d released poll_val = 0x%x\n", |
| lck, gtid, poll_val ) ); |
| |
| if ( KMP_LOCK_STRIP(poll_val) & 1 ) { |
| KA_TRACE( 1000, ("__kmp_release_futex_lock: lck:%p, T#%d futex_wake 1 thread\n", |
| lck, gtid ) ); |
| syscall( __NR_futex, & ( lck->lk.poll ), FUTEX_WAKE, KMP_LOCK_BUSY(1, futex), NULL, NULL, 0 ); |
| } |
| |
| KMP_MB(); /* Flush all pending memory write invalidates. */ |
| |
| KA_TRACE( 1000, ("__kmp_release_futex_lock: lck:%p(0x%x), T#%d exiting\n", |
| lck, lck->lk.poll, gtid ) ); |
| |
| KMP_YIELD( TCR_4( __kmp_nth ) > ( __kmp_avail_proc ? __kmp_avail_proc : |
| __kmp_xproc ) ); |
| return KMP_LOCK_RELEASED; |
| } |
| |
| static int |
| __kmp_release_futex_lock_with_checks( kmp_futex_lock_t *lck, kmp_int32 gtid ) |
| { |
| char const * const func = "omp_unset_lock"; |
| KMP_MB(); /* in case another processor initialized lock */ |
| if ( ( sizeof ( kmp_futex_lock_t ) <= OMP_LOCK_T_SIZE ) |
| && __kmp_is_futex_lock_nestable( lck ) ) { |
| KMP_FATAL( LockNestableUsedAsSimple, func ); |
| } |
| if ( __kmp_get_futex_lock_owner( lck ) == -1 ) { |
| KMP_FATAL( LockUnsettingFree, func ); |
| } |
| if ( ( gtid >= 0 ) && ( __kmp_get_futex_lock_owner( lck ) >= 0 ) |
| && ( __kmp_get_futex_lock_owner( lck ) != gtid ) ) { |
| KMP_FATAL( LockUnsettingSetByAnother, func ); |
| } |
| return __kmp_release_futex_lock( lck, gtid ); |
| } |
| |
| void |
| __kmp_init_futex_lock( kmp_futex_lock_t * lck ) |
| { |
| TCW_4( lck->lk.poll, KMP_LOCK_FREE(futex) ); |
| } |
| |
| static void |
| __kmp_init_futex_lock_with_checks( kmp_futex_lock_t * lck ) |
| { |
| __kmp_init_futex_lock( lck ); |
| } |
| |
| void |
| __kmp_destroy_futex_lock( kmp_futex_lock_t *lck ) |
| { |
| lck->lk.poll = 0; |
| } |
| |
| static void |
| __kmp_destroy_futex_lock_with_checks( kmp_futex_lock_t *lck ) |
| { |
| char const * const func = "omp_destroy_lock"; |
| if ( ( sizeof ( kmp_futex_lock_t ) <= OMP_LOCK_T_SIZE ) |
| && __kmp_is_futex_lock_nestable( lck ) ) { |
| KMP_FATAL( LockNestableUsedAsSimple, func ); |
| } |
| if ( __kmp_get_futex_lock_owner( lck ) != -1 ) { |
| KMP_FATAL( LockStillOwned, func ); |
| } |
| __kmp_destroy_futex_lock( lck ); |
| } |
| |
| |
| // |
| // nested futex locks |
| // |
| |
| int |
| __kmp_acquire_nested_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid ) |
| { |
| KMP_DEBUG_ASSERT( gtid >= 0 ); |
| |
| if ( __kmp_get_futex_lock_owner( lck ) == gtid ) { |
| lck->lk.depth_locked += 1; |
| return KMP_LOCK_ACQUIRED_NEXT; |
| } |
| else { |
| __kmp_acquire_futex_lock_timed_template( lck, gtid ); |
| ANNOTATE_FUTEX_ACQUIRED(lck); |
| lck->lk.depth_locked = 1; |
| return KMP_LOCK_ACQUIRED_FIRST; |
| } |
| } |
| |
| static int |
| __kmp_acquire_nested_futex_lock_with_checks( kmp_futex_lock_t *lck, kmp_int32 gtid ) |
| { |
| char const * const func = "omp_set_nest_lock"; |
| if ( ! __kmp_is_futex_lock_nestable( lck ) ) { |
| KMP_FATAL( LockSimpleUsedAsNestable, func ); |
| } |
| return __kmp_acquire_nested_futex_lock( lck, gtid ); |
| } |
| |
| int |
| __kmp_test_nested_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid ) |
| { |
| int retval; |
| |
| KMP_DEBUG_ASSERT( gtid >= 0 ); |
| |
| if ( __kmp_get_futex_lock_owner( lck ) == gtid ) { |
| retval = ++lck->lk.depth_locked; |
| } |
| else if ( !__kmp_test_futex_lock( lck, gtid ) ) { |
| retval = 0; |
| } |
| else { |
| KMP_MB(); |
| retval = lck->lk.depth_locked = 1; |
| } |
| return retval; |
| } |
| |
| static int |
| __kmp_test_nested_futex_lock_with_checks( kmp_futex_lock_t *lck, kmp_int32 gtid ) |
| { |
| char const * const func = "omp_test_nest_lock"; |
| if ( ! __kmp_is_futex_lock_nestable( lck ) ) { |
| KMP_FATAL( LockSimpleUsedAsNestable, func ); |
| } |
| return __kmp_test_nested_futex_lock( lck, gtid ); |
| } |
| |
| int |
| __kmp_release_nested_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid ) |
| { |
| KMP_DEBUG_ASSERT( gtid >= 0 ); |
| |
| KMP_MB(); |
| if ( --(lck->lk.depth_locked) == 0 ) { |
| __kmp_release_futex_lock( lck, gtid ); |
| return KMP_LOCK_RELEASED; |
| } |
| return KMP_LOCK_STILL_HELD; |
| } |
| |
| static int |
| __kmp_release_nested_futex_lock_with_checks( kmp_futex_lock_t *lck, kmp_int32 gtid ) |
| { |
| char const * const func = "omp_unset_nest_lock"; |
| KMP_MB(); /* in case another processor initialized lock */ |
| if ( ! __kmp_is_futex_lock_nestable( lck ) ) { |
| KMP_FATAL( LockSimpleUsedAsNestable, func ); |
| } |
| if ( __kmp_get_futex_lock_owner( lck ) == -1 ) { |
| KMP_FATAL( LockUnsettingFree, func ); |
| } |
| if ( __kmp_get_futex_lock_owner( lck ) != gtid ) { |
| KMP_FATAL( LockUnsettingSetByAnother, func ); |
| } |
| return __kmp_release_nested_futex_lock( lck, gtid ); |
| } |
| |
| void |
| __kmp_init_nested_futex_lock( kmp_futex_lock_t * lck ) |
| { |
| __kmp_init_futex_lock( lck ); |
| lck->lk.depth_locked = 0; // >= 0 for nestable locks, -1 for simple locks |
| } |
| |
| static void |
| __kmp_init_nested_futex_lock_with_checks( kmp_futex_lock_t * lck ) |
| { |
| __kmp_init_nested_futex_lock( lck ); |
| } |
| |
| void |
| __kmp_destroy_nested_futex_lock( kmp_futex_lock_t *lck ) |
| { |
| __kmp_destroy_futex_lock( lck ); |
| lck->lk.depth_locked = 0; |
| } |
| |
| static void |
| __kmp_destroy_nested_futex_lock_with_checks( kmp_futex_lock_t *lck ) |
| { |
| char const * const func = "omp_destroy_nest_lock"; |
| if ( ! __kmp_is_futex_lock_nestable( lck ) ) { |
| KMP_FATAL( LockSimpleUsedAsNestable, func ); |
| } |
| if ( __kmp_get_futex_lock_owner( lck ) != -1 ) { |
| KMP_FATAL( LockStillOwned, func ); |
| } |
| __kmp_destroy_nested_futex_lock( lck ); |
| } |
| |
| #endif // KMP_USE_FUTEX |
| |
| |
| /* ------------------------------------------------------------------------ */ |
| /* ticket (bakery) locks */ |
| |
| static kmp_int32 |
| __kmp_get_ticket_lock_owner( kmp_ticket_lock_t *lck ) |
| { |
| return std::atomic_load_explicit( &lck->lk.owner_id, std::memory_order_relaxed ) - 1; |
| } |
| |
| static inline bool |
| __kmp_is_ticket_lock_nestable( kmp_ticket_lock_t *lck ) |
| { |
| return std::atomic_load_explicit( &lck->lk.depth_locked, std::memory_order_relaxed ) != -1; |
| } |
| |
| static kmp_uint32 |
| __kmp_bakery_check( void *now_serving, kmp_uint32 my_ticket ) |
| { |
| return std::atomic_load_explicit( (std::atomic<unsigned> *)now_serving, std::memory_order_acquire ) == my_ticket; |
| } |
| |
| __forceinline static int |
| __kmp_acquire_ticket_lock_timed_template( kmp_ticket_lock_t *lck, kmp_int32 gtid ) |
| { |
| kmp_uint32 my_ticket = std::atomic_fetch_add_explicit( &lck->lk.next_ticket, 1U, std::memory_order_relaxed ); |
| |
| #ifdef USE_LOCK_PROFILE |
| if ( std::atomic_load_explicit( &lck->lk.now_serving, std::memory_order_relaxed ) != my_ticket ) |
| __kmp_printf( "LOCK CONTENTION: %p\n", lck ); |
| /* else __kmp_printf( "." );*/ |
| #endif /* USE_LOCK_PROFILE */ |
| |
| if ( std::atomic_load_explicit( &lck->lk.now_serving, std::memory_order_acquire ) == my_ticket ) { |
| return KMP_LOCK_ACQUIRED_FIRST; |
| } |
| KMP_WAIT_YIELD_PTR( &lck->lk.now_serving, my_ticket, __kmp_bakery_check, lck ); |
| return KMP_LOCK_ACQUIRED_FIRST; |
| } |
| |
| int |
| __kmp_acquire_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid ) |
| { |
| int retval = __kmp_acquire_ticket_lock_timed_template( lck, gtid ); |
| ANNOTATE_TICKET_ACQUIRED(lck); |
| return retval; |
| } |
| |
| static int |
| __kmp_acquire_ticket_lock_with_checks( kmp_ticket_lock_t *lck, kmp_int32 gtid ) |
| { |
| char const * const func = "omp_set_lock"; |
| |
| if ( ! std::atomic_load_explicit( &lck->lk.initialized, std::memory_order_relaxed ) ) { |
| KMP_FATAL( LockIsUninitialized, func ); |
| } |
| if ( lck->lk.self != lck ) { |
| KMP_FATAL( LockIsUninitialized, func ); |
| } |
| if ( __kmp_is_ticket_lock_nestable( lck ) ) { |
| KMP_FATAL( LockNestableUsedAsSimple, func ); |
| } |
| if ( ( gtid >= 0 ) && ( __kmp_get_ticket_lock_owner( lck ) == gtid ) ) { |
| KMP_FATAL( LockIsAlreadyOwned, func ); |
| } |
| |
| __kmp_acquire_ticket_lock( lck, gtid ); |
| |
| std::atomic_store_explicit( &lck->lk.owner_id, gtid + 1, std::memory_order_relaxed ); |
| return KMP_LOCK_ACQUIRED_FIRST; |
| } |
| |
| int |
| __kmp_test_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid ) |
| { |
| kmp_uint32 my_ticket = std::atomic_load_explicit( &lck->lk.next_ticket, std::memory_order_relaxed ); |
| |
| if ( std::atomic_load_explicit( &lck->lk.now_serving, std::memory_order_relaxed ) == my_ticket ) { |
| kmp_uint32 next_ticket = my_ticket + 1; |
| if ( std::atomic_compare_exchange_strong_explicit( &lck->lk.next_ticket, |
| &my_ticket, next_ticket, std::memory_order_acquire, std::memory_order_acquire )) { |
| return TRUE; |
| } |
| } |
| return FALSE; |
| } |
| |
| static int |
| __kmp_test_ticket_lock_with_checks( kmp_ticket_lock_t *lck, kmp_int32 gtid ) |
| { |
| char const * const func = "omp_test_lock"; |
| |
| if ( ! std::atomic_load_explicit( &lck->lk.initialized, std::memory_order_relaxed ) ) { |
| KMP_FATAL( LockIsUninitialized, func ); |
| } |
| if ( lck->lk.self != lck ) { |
| KMP_FATAL( LockIsUninitialized, func ); |
| } |
| if ( __kmp_is_ticket_lock_nestable( lck ) ) { |
| KMP_FATAL( LockNestableUsedAsSimple, func ); |
| } |
| |
| int retval = __kmp_test_ticket_lock( lck, gtid ); |
| |
| if ( retval ) { |
| std::atomic_store_explicit( &lck->lk.owner_id, gtid + 1, std::memory_order_relaxed ); |
| } |
| return retval; |
| } |
| |
| int |
| __kmp_release_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid ) |
| { |
| kmp_uint32 distance = std::atomic_load_explicit( &lck->lk.next_ticket, std::memory_order_relaxed ) - std::atomic_load_explicit( &lck->lk.now_serving, std::memory_order_relaxed ); |
| |
| ANNOTATE_TICKET_RELEASED(lck); |
| std::atomic_fetch_add_explicit( &lck->lk.now_serving, 1U, std::memory_order_release ); |
| |
| KMP_YIELD( distance |
| > (kmp_uint32) (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc) ); |
| return KMP_LOCK_RELEASED; |
| } |
| |
| static int |
| __kmp_release_ticket_lock_with_checks( kmp_ticket_lock_t *lck, kmp_int32 gtid ) |
| { |
| char const * const func = "omp_unset_lock"; |
| |
| if ( ! std::atomic_load_explicit( &lck->lk.initialized, std::memory_order_relaxed ) ) { |
| KMP_FATAL( LockIsUninitialized, func ); |
| } |
| if ( lck->lk.self != lck ) { |
| KMP_FATAL( LockIsUninitialized, func ); |
| } |
| if ( __kmp_is_ticket_lock_nestable( lck ) ) { |
| KMP_FATAL( LockNestableUsedAsSimple, func ); |
| } |
| if ( __kmp_get_ticket_lock_owner( lck ) == -1 ) { |
| KMP_FATAL( LockUnsettingFree, func ); |
| } |
| if ( ( gtid >= 0 ) && ( __kmp_get_ticket_lock_owner( lck ) >= 0 ) |
| && ( __kmp_get_ticket_lock_owner( lck ) != gtid ) ) { |
| KMP_FATAL( LockUnsettingSetByAnother, func ); |
| } |
| std::atomic_store_explicit( &lck->lk.owner_id, 0, std::memory_order_relaxed ); |
| return __kmp_release_ticket_lock( lck, gtid ); |
| } |
| |
| void |
| __kmp_init_ticket_lock( kmp_ticket_lock_t * lck ) |
| { |
| lck->lk.location = NULL; |
| lck->lk.self = lck; |
| std::atomic_store_explicit( &lck->lk.next_ticket, 0U, std::memory_order_relaxed ); |
| std::atomic_store_explicit( &lck->lk.now_serving, 0U, std::memory_order_relaxed ); |
| std::atomic_store_explicit( &lck->lk.owner_id, 0, std::memory_order_relaxed ); // no thread owns the lock. |
| std::atomic_store_explicit( &lck->lk.depth_locked, -1, std::memory_order_relaxed ); // -1 => not a nested lock. |
| std::atomic_store_explicit( &lck->lk.initialized, true, std::memory_order_release ); |
| } |
| |
| static void |
| __kmp_init_ticket_lock_with_checks( kmp_ticket_lock_t * lck ) |
| { |
| __kmp_init_ticket_lock( lck ); |
| } |
| |
| void |
| __kmp_destroy_ticket_lock( kmp_ticket_lock_t *lck ) |
| { |
| std::atomic_store_explicit( &lck->lk.initialized, false, std::memory_order_release ); |
| lck->lk.self = NULL; |
| lck->lk.location = NULL; |
| std::atomic_store_explicit( &lck->lk.next_ticket, 0U, std::memory_order_relaxed ); |
| std::atomic_store_explicit( &lck->lk.now_serving, 0U, std::memory_order_relaxed ); |
| std::atomic_store_explicit( &lck->lk.owner_id, 0, std::memory_order_relaxed ); |
| std::atomic_store_explicit( &lck->lk.depth_locked, -1, std::memory_order_relaxed ); |
| } |
| |
| static void |
| __kmp_destroy_ticket_lock_with_checks( kmp_ticket_lock_t *lck ) |
| { |
| char const * const func = "omp_destroy_lock"; |
| |
| if ( ! std::atomic_load_explicit( &lck->lk.initialized, std::memory_order_relaxed ) ) { |
| KMP_FATAL( LockIsUninitialized, func ); |
| } |
| if ( lck->lk.self != lck ) { |
| KMP_FATAL( LockIsUninitialized, func ); |
| } |
| if ( __kmp_is_ticket_lock_nestable( lck ) ) { |
| KMP_FATAL( LockNestableUsedAsSimple, func ); |
| } |
| if ( __kmp_get_ticket_lock_owner( lck ) != -1 ) { |
| KMP_FATAL( LockStillOwned, func ); |
| } |
| __kmp_destroy_ticket_lock( lck ); |
| } |
| |
| |
| // |
| // nested ticket locks |
| // |
| |
| int |
| __kmp_acquire_nested_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid ) |
| { |
| KMP_DEBUG_ASSERT( gtid >= 0 ); |
| |
| if ( __kmp_get_ticket_lock_owner( lck ) == gtid ) { |
| std::atomic_fetch_add_explicit( &lck->lk.depth_locked, 1, std::memory_order_relaxed ); |
| return KMP_LOCK_ACQUIRED_NEXT; |
| } |
| else { |
| __kmp_acquire_ticket_lock_timed_template( lck, gtid ); |
| ANNOTATE_TICKET_ACQUIRED(lck); |
| std::atomic_store_explicit( &lck->lk.depth_locked, 1, std::memory_order_relaxed ); |
| std::atomic_store_explicit( &lck->lk.owner_id, gtid + 1, std::memory_order_relaxed ); |
| return KMP_LOCK_ACQUIRED_FIRST; |
| } |
| } |
| |
| static int |
| __kmp_acquire_nested_ticket_lock_with_checks( kmp_ticket_lock_t *lck, kmp_int32 gtid ) |
| { |
| char const * const func = "omp_set_nest_lock"; |
| |
| if ( ! std::atomic_load_explicit( &lck->lk.initialized, std::memory_order_relaxed ) ) { |
| KMP_FATAL( LockIsUninitialized, func ); |
| } |
| if ( lck->lk.self != lck ) { |
| KMP_FATAL( LockIsUninitialized, func ); |
| } |
| if ( ! __kmp_is_ticket_lock_nestable( lck ) ) { |
| KMP_FATAL( LockSimpleUsedAsNestable, func ); |
| } |
| return __kmp_acquire_nested_ticket_lock( lck, gtid ); |
| } |
| |
| int |
| __kmp_test_nested_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid ) |
| { |
| int retval; |
| |
| KMP_DEBUG_ASSERT( gtid >= 0 ); |
| |
| if ( __kmp_get_ticket_lock_owner( lck ) == gtid ) { |
| retval = std::atomic_fetch_add_explicit( &lck->lk.depth_locked, 1, std::memory_order_relaxed ) + 1; |
| } |
| else if ( !__kmp_test_ticket_lock( lck, gtid ) ) { |
| retval = 0; |
| } |
| else { |
| std::atomic_store_explicit( &lck->lk.depth_locked, 1, std::memory_order_relaxed ); |
| std::atomic_store_explicit( &lck->lk.owner_id, gtid + 1, std::memory_order_relaxed ); |
| retval = 1; |
| } |
| return retval; |
| } |
| |
| static int |
| __kmp_test_nested_ticket_lock_with_checks( kmp_ticket_lock_t *lck, |
| kmp_int32 gtid ) |
| { |
| char const * const func = "omp_test_nest_lock"; |
| |
| if ( ! std::atomic_load_explicit( &lck->lk.initialized, std::memory_order_relaxed ) ) { |
| KMP_FATAL( LockIsUninitialized, func ); |
| } |
| if ( lck->lk.self != lck ) { |
| KMP_FATAL( LockIsUninitialized, func ); |
| } |
| if ( ! __kmp_is_ticket_lock_nestable( lck ) ) { |
| KMP_FATAL( LockSimpleUsedAsNestable, func ); |
| } |
| return __kmp_test_nested_ticket_lock( lck, gtid ); |
| } |
| |
| int |
| __kmp_release_nested_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid ) |
| { |
| KMP_DEBUG_ASSERT( gtid >= 0 ); |
| |
| if ( ( std::atomic_fetch_add_explicit( &lck->lk.depth_locked, -1, std::memory_order_relaxed ) - 1 ) == 0 ) { |
| std::atomic_store_explicit( &lck->lk.owner_id, 0, std::memory_order_relaxed ); |
| __kmp_release_ticket_lock( lck, gtid ); |
| return KMP_LOCK_RELEASED; |
| } |
| return KMP_LOCK_STILL_HELD; |
| } |
| |
| static int |
| __kmp_release_nested_ticket_lock_with_checks( kmp_ticket_lock_t *lck, kmp_int32 gtid ) |
| { |
| char const * const func = "omp_unset_nest_lock"; |
| |
| if ( ! std::atomic_load_explicit( &lck->lk.initialized, std::memory_order_relaxed ) ) { |
| KMP_FATAL( LockIsUninitialized, func ); |
| } |
| if ( lck->lk.self != lck ) { |
| KMP_FATAL( LockIsUninitialized, func ); |
| } |
| if ( ! __kmp_is_ticket_lock_nestable( lck ) ) { |
| KMP_FATAL( LockSimpleUsedAsNestable, func ); |
| } |
| if ( __kmp_get_ticket_lock_owner( lck ) == -1 ) { |
| KMP_FATAL( LockUnsettingFree, func ); |
| } |
| if ( __kmp_get_ticket_lock_owner( lck ) != gtid ) { |
| KMP_FATAL( LockUnsettingSetByAnother, func ); |
| } |
| return __kmp_release_nested_ticket_lock( lck, gtid ); |
| } |
| |
| void |
| __kmp_init_nested_ticket_lock( kmp_ticket_lock_t * lck ) |
| { |
| __kmp_init_ticket_lock( lck ); |
| std::atomic_store_explicit( &lck->lk.depth_locked, 0, std::memory_order_relaxed ); // >= 0 for nestable locks, -1 for simple locks |
| } |
| |
| static void |
| __kmp_init_nested_ticket_lock_with_checks( kmp_ticket_lock_t * lck ) |
| { |
| __kmp_init_nested_ticket_lock( lck ); |
| } |
| |
| void |
| __kmp_destroy_nested_ticket_lock( kmp_ticket_lock_t *lck ) |
| { |
| __kmp_destroy_ticket_lock( lck ); |
| std::atomic_store_explicit( &lck->lk.depth_locked, 0, std::memory_order_relaxed ); |
| } |
| |
| static void |
| __kmp_destroy_nested_ticket_lock_with_checks( kmp_ticket_lock_t *lck ) |
| { |
| char const * const func = "omp_destroy_nest_lock"; |
| |
| if ( ! std::atomic_load_explicit( &lck->lk.initialized, std::memory_order_relaxed ) ) { |
| KMP_FATAL( LockIsUninitialized, func ); |
| } |
| if ( lck->lk.self != lck ) { |
| KMP_FATAL( LockIsUninitialized, func ); |
| } |
| if ( ! __kmp_is_ticket_lock_nestable( lck ) ) { |
| KMP_FATAL( LockSimpleUsedAsNestable, func ); |
| } |
| if ( __kmp_get_ticket_lock_owner( lck ) != -1 ) { |
| KMP_FATAL( LockStillOwned, func ); |
| } |
| __kmp_destroy_nested_ticket_lock( lck ); |
| } |
| |
| |
| // |
| // access functions to fields which don't exist for all lock kinds. |
| // |
| |
| static int |
| __kmp_is_ticket_lock_initialized( kmp_ticket_lock_t *lck ) |
| { |
| return std::atomic_load_explicit( &lck->lk.initialized, std::memory_order_relaxed ) && ( lck->lk.self == lck); |
| } |
| |
| static const ident_t * |
| __kmp_get_ticket_lock_location( kmp_ticket_lock_t *lck ) |
| { |
| return lck->lk.location; |
| } |
| |
| static void |
| __kmp_set_ticket_lock_location( kmp_ticket_lock_t *lck, const ident_t *loc ) |
| { |
| lck->lk.location = loc; |
| } |
| |
| static kmp_lock_flags_t |
| __kmp_get_ticket_lock_flags( kmp_ticket_lock_t *lck ) |
| { |
| return lck->lk.flags; |
| } |
| |
| static void |
| __kmp_set_ticket_lock_flags( kmp_ticket_lock_t *lck, kmp_lock_flags_t flags ) |
| { |
| lck->lk.flags = flags; |
| } |
| |
| /* ------------------------------------------------------------------------ */ |
| /* queuing locks */ |
| |
| /* |
| * First the states |
| * (head,tail) = 0, 0 means lock is unheld, nobody on queue |
| * UINT_MAX or -1, 0 means lock is held, nobody on queue |
| * h, h means lock is held or about to transition, 1 element on queue |
| * h, t h <> t, means lock is held or about to transition, >1 elements on queue |
| * |
| * Now the transitions |
| * Acquire(0,0) = -1 ,0 |
| * Release(0,0) = Error |
| * Acquire(-1,0) = h ,h h > 0 |
| * Release(-1,0) = 0 ,0 |
| * Acquire(h,h) = h ,t h > 0, t > 0, h <> t |
| * Release(h,h) = -1 ,0 h > 0 |
| * Acquire(h,t) = h ,t' h > 0, t > 0, t' > 0, h <> t, h <> t', t <> t' |
| * Release(h,t) = h',t h > 0, t > 0, h <> t, h <> h', h' maybe = t |
| * |
| * And pictorially |
| * |
| * |
| * +-----+ |
| * | 0, 0|------- release -------> Error |
| * +-----+ |
| * | ^ |
| * acquire| |release |
| * | | |
| * | | |
| * v | |
| * +-----+ |
| * |-1, 0| |
| * +-----+ |
| * | ^ |
| * acquire| |release |
| * | | |
| * | | |
| * v | |
| * +-----+ |
| * | h, h| |
| * +-----+ |
| * | ^ |
| * acquire| |release |
| * | | |
| * | | |
| * v | |
| * +-----+ |
| * | h, t|----- acquire, release loopback ---+ |
| * +-----+ | |
| * ^ | |
| * | | |
| * +------------------------------------+ |
| * |
| */ |
| |
| #ifdef DEBUG_QUEUING_LOCKS |
| |
| /* Stuff for circular trace buffer */ |
| #define TRACE_BUF_ELE 1024 |
| static char traces[TRACE_BUF_ELE][128] = { 0 } |
| static int tc = 0; |
| #define TRACE_LOCK(X,Y) KMP_SNPRINTF( traces[tc++ % TRACE_BUF_ELE], 128, "t%d at %s\n", X, Y ); |
| #define TRACE_LOCK_T(X,Y,Z) KMP_SNPRINTF( traces[tc++ % TRACE_BUF_ELE], 128, "t%d at %s%d\n", X,Y,Z ); |
| #define TRACE_LOCK_HT(X,Y,Z,Q) KMP_SNPRINTF( traces[tc++ % TRACE_BUF_ELE], 128, "t%d at %s %d,%d\n", X, Y, Z, Q ); |
| |
| static void |
| __kmp_dump_queuing_lock( kmp_info_t *this_thr, kmp_int32 gtid, |
| kmp_queuing_lock_t *lck, kmp_int32 head_id, kmp_int32 tail_id ) |
| { |
| kmp_int32 t, i; |
| |
| __kmp_printf_no_lock( "\n__kmp_dump_queuing_lock: TRACE BEGINS HERE! \n" ); |
| |
| i = tc % TRACE_BUF_ELE; |
| __kmp_printf_no_lock( "%s\n", traces[i] ); |
| i = (i+1) % TRACE_BUF_ELE; |
| while ( i != (tc % TRACE_BUF_ELE) ) { |
| __kmp_printf_no_lock( "%s", traces[i] ); |
| i = (i+1) % TRACE_BUF_ELE; |
| } |
| __kmp_printf_no_lock( "\n" ); |
| |
| __kmp_printf_no_lock( |
| "\n__kmp_dump_queuing_lock: gtid+1:%d, spin_here:%d, next_wait:%d, head_id:%d, tail_id:%d\n", |
| gtid+1, this_thr->th.th_spin_here, this_thr->th.th_next_waiting, |
| head_id, tail_id ); |
| |
| __kmp_printf_no_lock( "\t\thead: %d ", lck->lk.head_id ); |
| |
| if ( lck->lk.head_id >= 1 ) { |
| t = __kmp_threads[lck->lk.head_id-1]->th.th_next_waiting; |
| while (t > 0) { |
| __kmp_printf_no_lock( "-> %d ", t ); |
| t = __kmp_threads[t-1]->th.th_next_waiting; |
| } |
| } |
| __kmp_printf_no_lock( "; tail: %d ", lck->lk.tail_id ); |
| __kmp_printf_no_lock( "\n\n" ); |
| } |
| |
| #endif /* DEBUG_QUEUING_LOCKS */ |
| |
| static kmp_int32 |
| __kmp_get_queuing_lock_owner( kmp_queuing_lock_t *lck ) |
| { |
| return TCR_4( lck->lk.owner_id ) - 1; |
| } |
| |
| static inline bool |
| __kmp_is_queuing_lock_nestable( kmp_queuing_lock_t *lck ) |
| { |
| return lck->lk.depth_locked != -1; |
| } |
| |
| /* Acquire a lock using a the queuing lock implementation */ |
| template <bool takeTime> |
| /* [TLW] The unused template above is left behind because of what BEB believes is a |
| potential compiler problem with __forceinline. */ |
| __forceinline static int |
| __kmp_acquire_queuing_lock_timed_template( kmp_queuing_lock_t *lck, |
| kmp_int32 gtid ) |
| { |
| register kmp_info_t *this_thr = __kmp_thread_from_gtid( gtid ); |
| volatile kmp_int32 *head_id_p = & lck->lk.head_id; |
| volatile kmp_int32 *tail_id_p = & lck->lk.tail_id; |
| volatile kmp_uint32 *spin_here_p; |
| kmp_int32 need_mf = 1; |
| |
| #if OMPT_SUPPORT |
| ompt_state_t prev_state = ompt_state_undefined; |
| #endif |
| |
| KA_TRACE( 1000, ("__kmp_acquire_queuing_lock: lck:%p, T#%d entering\n", lck, gtid )); |
| |
| KMP_FSYNC_PREPARE( lck ); |
| KMP_DEBUG_ASSERT( this_thr != NULL ); |
| spin_here_p = & this_thr->th.th_spin_here; |
| |
| #ifdef DEBUG_QUEUING_LOCKS |
| TRACE_LOCK( gtid+1, "acq ent" ); |
| if ( *spin_here_p ) |
| __kmp_dump_queuing_lock( this_thr, gtid, lck, *head_id_p, *tail_id_p ); |
| if ( this_thr->th.th_next_waiting != 0 ) |
| __kmp_dump_queuing_lock( this_thr, gtid, lck, *head_id_p, *tail_id_p ); |
| #endif |
| KMP_DEBUG_ASSERT( !*spin_here_p ); |
| KMP_DEBUG_ASSERT( this_thr->th.th_next_waiting == 0 ); |
| |
| |
| /* The following st.rel to spin_here_p needs to precede the cmpxchg.acq to head_id_p |
| that may follow, not just in execution order, but also in visibility order. This way, |
| when a releasing thread observes the changes to the queue by this thread, it can |
| rightly assume that spin_here_p has already been set to TRUE, so that when it sets |
| spin_here_p to FALSE, it is not premature. If the releasing thread sets spin_here_p |
| to FALSE before this thread sets it to TRUE, this thread will hang. |
| */ |
| *spin_here_p = TRUE; /* before enqueuing to prevent race */ |
| |
| while( 1 ) { |
| kmp_int32 enqueued; |
| kmp_int32 head; |
| kmp_int32 tail; |
| |
| head = *head_id_p; |
| |
| switch ( head ) { |
| |
| case -1: |
| { |
| #ifdef DEBUG_QUEUING_LOCKS |
| tail = *tail_id_p; |
| TRACE_LOCK_HT( gtid+1, "acq read: ", head, tail ); |
| #endif |
| tail = 0; /* to make sure next link asynchronously read is not set accidentally; |
| this assignment prevents us from entering the if ( t > 0 ) |
| condition in the enqueued case below, which is not necessary for |
| this state transition */ |
| |
| need_mf = 0; |
| /* try (-1,0)->(tid,tid) */ |
| enqueued = KMP_COMPARE_AND_STORE_ACQ64( (volatile kmp_int64 *) tail_id_p, |
| KMP_PACK_64( -1, 0 ), |
| KMP_PACK_64( gtid+1, gtid+1 ) ); |
| #ifdef DEBUG_QUEUING_LOCKS |
| if ( enqueued ) TRACE_LOCK( gtid+1, "acq enq: (-1,0)->(tid,tid)" ); |
| #endif |
| } |
| break; |
| |
| default: |
| { |
| tail = *tail_id_p; |
| KMP_DEBUG_ASSERT( tail != gtid + 1 ); |
| |
| #ifdef DEBUG_QUEUING_LOCKS |
| TRACE_LOCK_HT( gtid+1, "acq read: ", head, tail ); |
| #endif |
| |
| if ( tail == 0 ) { |
| enqueued = FALSE; |
| } |
| else { |
| need_mf = 0; |
| /* try (h,t) or (h,h)->(h,tid) */ |
| enqueued = KMP_COMPARE_AND_STORE_ACQ32( tail_id_p, tail, gtid+1 ); |
| |
| #ifdef DEBUG_QUEUING_LOCKS |
| if ( enqueued ) TRACE_LOCK( gtid+1, "acq enq: (h,t)->(h,tid)" ); |
| #endif |
| } |
| } |
| break; |
| |
| case 0: /* empty queue */ |
| { |
| kmp_int32 grabbed_lock; |
| |
| #ifdef DEBUG_QUEUING_LOCKS |
| tail = *tail_id_p; |
| TRACE_LOCK_HT( gtid+1, "acq read: ", head, tail ); |
| #endif |
| /* try (0,0)->(-1,0) */ |
| |
| /* only legal transition out of head = 0 is head = -1 with no change to tail */ |
| grabbed_lock = KMP_COMPARE_AND_STORE_ACQ32( head_id_p, 0, -1 ); |
| |
| if ( grabbed_lock ) { |
| |
| *spin_here_p = FALSE; |
| |
| KA_TRACE( 1000, ("__kmp_acquire_queuing_lock: lck:%p, T#%d exiting: no queuing\n", |
| lck, gtid )); |
| #ifdef DEBUG_QUEUING_LOCKS |
| TRACE_LOCK_HT( gtid+1, "acq exit: ", head, 0 ); |
| #endif |
| |
| #if OMPT_SUPPORT |
| if (ompt_enabled && prev_state != ompt_state_undefined) { |
| /* change the state before clearing wait_id */ |
| this_thr->th.ompt_thread_info.state = prev_state; |
| this_thr->th.ompt_thread_info.wait_id = 0; |
| } |
| #endif |
| |
| KMP_FSYNC_ACQUIRED( lck ); |
| return KMP_LOCK_ACQUIRED_FIRST; /* lock holder cannot be on queue */ |
| } |
| enqueued = FALSE; |
| } |
| break; |
| } |
| |
| #if OMPT_SUPPORT |
| if (ompt_enabled && prev_state == ompt_state_undefined) { |
| /* this thread will spin; set wait_id before entering wait state */ |
| prev_state = this_thr->th.ompt_thread_info.state; |
| this_thr->th.ompt_thread_info.wait_id = (uint64_t) lck; |
| this_thr->th.ompt_thread_info.state = ompt_state_wait_lock; |
| } |
| #endif |
| |
| if ( enqueued ) { |
| if ( tail > 0 ) { |
| kmp_info_t *tail_thr = __kmp_thread_from_gtid( tail - 1 ); |
| KMP_ASSERT( tail_thr != NULL ); |
| tail_thr->th.th_next_waiting = gtid+1; |
| /* corresponding wait for this write in release code */ |
| } |
| KA_TRACE( 1000, ("__kmp_acquire_queuing_lock: lck:%p, T#%d waiting for lock\n", lck, gtid )); |
| |
| |
| /* ToDo: May want to consider using __kmp_wait_sleep or something that sleeps for |
| * throughput only here. |
| */ |
| KMP_MB(); |
| KMP_WAIT_YIELD(spin_here_p, FALSE, KMP_EQ, lck); |
| |
| #ifdef DEBUG_QUEUING_LOCKS |
| TRACE_LOCK( gtid+1, "acq spin" ); |
| |
| if ( this_thr->th.th_next_waiting != 0 ) |
| __kmp_dump_queuing_lock( this_thr, gtid, lck, *head_id_p, *tail_id_p ); |
| #endif |
| KMP_DEBUG_ASSERT( this_thr->th.th_next_waiting == 0 ); |
| KA_TRACE( 1000, ("__kmp_acquire_queuing_lock: lck:%p, T#%d exiting: after waiting on queue\n", |
| lck, gtid )); |
| |
| #ifdef DEBUG_QUEUING_LOCKS |
| TRACE_LOCK( gtid+1, "acq exit 2" ); |
| #endif |
| |
| #if OMPT_SUPPORT |
| /* change the state before clearing wait_id */ |
| this_thr->th.ompt_thread_info.state = prev_state; |
| this_thr->th.ompt_thread_info.wait_id = 0; |
| #endif |
| |
| /* got lock, we were dequeued by the thread that released lock */ |
| return KMP_LOCK_ACQUIRED_FIRST; |
| } |
| |
| /* Yield if number of threads > number of logical processors */ |
| /* ToDo: Not sure why this should only be in oversubscription case, |
| maybe should be traditional YIELD_INIT/YIELD_WHEN loop */ |
| KMP_YIELD( TCR_4( __kmp_nth ) > (__kmp_avail_proc ? __kmp_avail_proc : |
| __kmp_xproc ) ); |
| #ifdef DEBUG_QUEUING_LOCKS |
| TRACE_LOCK( gtid+1, "acq retry" ); |
| #endif |
| |
| } |
| KMP_ASSERT2( 0, "should not get here" ); |
| return KMP_LOCK_ACQUIRED_FIRST; |
| } |
| |
| int |
| __kmp_acquire_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid ) |
| { |
| KMP_DEBUG_ASSERT( gtid >= 0 ); |
| |
| int retval = __kmp_acquire_queuing_lock_timed_template<false>( lck, gtid ); |
| ANNOTATE_QUEUING_ACQUIRED(lck); |
| return retval; |
| } |
| |
| static int |
| __kmp_acquire_queuing_lock_with_checks( kmp_queuing_lock_t *lck, |
| kmp_int32 gtid ) |
| { |
| char const * const func = "omp_set_lock"; |
| if ( lck->lk.initialized != lck ) { |
| KMP_FATAL( LockIsUninitialized, func ); |
| } |
| if ( __kmp_is_queuing_lock_nestable( lck ) ) { |
| KMP_FATAL( LockNestableUsedAsSimple, func ); |
| } |
| if ( __kmp_get_queuing_lock_owner( lck ) == gtid ) { |
| KMP_FATAL( LockIsAlreadyOwned, func ); |
| } |
| |
| __kmp_acquire_queuing_lock( lck, gtid ); |
| |
| lck->lk.owner_id = gtid + 1; |
| return KMP_LOCK_ACQUIRED_FIRST; |
| } |
| |
| int |
| __kmp_test_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid ) |
| { |
| volatile kmp_int32 *head_id_p = & lck->lk.head_id; |
| kmp_int32 head; |
| #ifdef KMP_DEBUG |
| kmp_info_t *this_thr; |
| #endif |
| |
| KA_TRACE( 1000, ("__kmp_test_queuing_lock: T#%d entering\n", gtid )); |
| KMP_DEBUG_ASSERT( gtid >= 0 ); |
| #ifdef KMP_DEBUG |
| this_thr = __kmp_thread_from_gtid( gtid ); |
| KMP_DEBUG_ASSERT( this_thr != NULL ); |
| KMP_DEBUG_ASSERT( !this_thr->th.th_spin_here ); |
| #endif |
| |
| head = *head_id_p; |
| |
| if ( head == 0 ) { /* nobody on queue, nobody holding */ |
| |
| /* try (0,0)->(-1,0) */ |
| |
| if ( KMP_COMPARE_AND_STORE_ACQ32( head_id_p, 0, -1 ) ) { |
| KA_TRACE( 1000, ("__kmp_test_queuing_lock: T#%d exiting: holding lock\n", gtid )); |
| KMP_FSYNC_ACQUIRED(lck); |
| ANNOTATE_QUEUING_ACQUIRED(lck); |
| return TRUE; |
| } |
| } |
| |
| KA_TRACE( 1000, ("__kmp_test_queuing_lock: T#%d exiting: without lock\n", gtid )); |
| return FALSE; |
| } |
| |
| static int |
| __kmp_test_queuing_lock_with_checks( kmp_queuing_lock_t *lck, kmp_int32 gtid ) |
| { |
| char const * const func = "omp_test_lock"; |
| if ( lck->lk.initialized != lck ) { |
| KMP_FATAL( LockIsUninitialized, func ); |
| } |
| if ( __kmp_is_queuing_lock_nestable( lck ) ) { |
| KMP_FATAL( LockNestableUsedAsSimple, func ); |
| } |
| |
| int retval = __kmp_test_queuing_lock( lck, gtid ); |
| |
| if ( retval ) { |
| lck->lk.owner_id = gtid + 1; |
| } |
| return retval; |
| } |
| |
| int |
| __kmp_release_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid ) |
| { |
| register kmp_info_t *this_thr; |
| volatile kmp_int32 *head_id_p = & lck->lk.head_id; |
| volatile kmp_int32 *tail_id_p = & lck->lk.tail_id; |
| |
| KA_TRACE( 1000, ("__kmp_release_queuing_lock: lck:%p, T#%d entering\n", lck, gtid )); |
| KMP_DEBUG_ASSERT( gtid >= 0 ); |
| this_thr = __kmp_thread_from_gtid( gtid ); |
| KMP_DEBUG_ASSERT( this_thr != NULL ); |
| #ifdef DEBUG_QUEUING_LOCKS |
| TRACE_LOCK( gtid+1, "rel ent" ); |
| |
| if ( this_thr->th.th_spin_here ) |
| __kmp_dump_queuing_lock( this_thr, gtid, lck, *head_id_p, *tail_id_p ); |
| if ( this_thr->th.th_next_waiting != 0 ) |
| __kmp_dump_queuing_lock( this_thr, gtid, lck, *head_id_p, *tail_id_p ); |
| #endif |
| KMP_DEBUG_ASSERT( !this_thr->th.th_spin_here ); |
| KMP_DEBUG_ASSERT( this_thr->th.th_next_waiting == 0 ); |
| |
| KMP_FSYNC_RELEASING(lck); |
| ANNOTATE_QUEUING_RELEASED(lck); |
| |
| while( 1 ) { |
| kmp_int32 dequeued; |
| kmp_int32 head; |
| kmp_int32 tail; |
| |
| head = *head_id_p; |
| |
| #ifdef DEBUG_QUEUING_LOCKS |
| tail = *tail_id_p; |
| TRACE_LOCK_HT( gtid+1, "rel read: ", head, tail ); |
| if ( head == 0 ) __kmp_dump_queuing_lock( this_thr, gtid, lck, head, tail ); |
| #endif |
| KMP_DEBUG_ASSERT( head != 0 ); /* holding the lock, head must be -1 or queue head */ |
| |
| if ( head == -1 ) { /* nobody on queue */ |
| |
| /* try (-1,0)->(0,0) */ |
| if ( KMP_COMPARE_AND_STORE_REL32( head_id_p, -1, 0 ) ) { |
| KA_TRACE( 1000, ("__kmp_release_queuing_lock: lck:%p, T#%d exiting: queue empty\n", |
| lck, gtid )); |
| #ifdef DEBUG_QUEUING_LOCKS |
| TRACE_LOCK_HT( gtid+1, "rel exit: ", 0, 0 ); |
| #endif |
| |
| #if OMPT_SUPPORT |
| /* nothing to do - no other thread is trying to shift blame */ |
| #endif |
| |
| return KMP_LOCK_RELEASED; |
| } |
| dequeued = FALSE; |
| |
| } |
| else { |
| |
| tail = *tail_id_p; |
| if ( head == tail ) { /* only one thread on the queue */ |
| |
| #ifdef DEBUG_QUEUING_LOCKS |
| if ( head <= 0 ) __kmp_dump_queuing_lock( this_thr, gtid, lck, head, tail ); |
| #endif |
| KMP_DEBUG_ASSERT( head > 0 ); |
| |
| /* try (h,h)->(-1,0) */ |
| dequeued = KMP_COMPARE_AND_STORE_REL64( (kmp_int64 *) tail_id_p, |
| KMP_PACK_64( head, head ), KMP_PACK_64( -1, 0 ) ); |
| #ifdef DEBUG_QUEUING_LOCKS |
| TRACE_LOCK( gtid+1, "rel deq: (h,h)->(-1,0)" ); |
| #endif |
| |
| } |
| else { |
| volatile kmp_int32 *waiting_id_p; |
| kmp_info_t *head_thr = __kmp_thread_from_gtid( head - 1 ); |
| KMP_DEBUG_ASSERT( head_thr != NULL ); |
| waiting_id_p = & head_thr->th.th_next_waiting; |
| |
| /* Does this require synchronous reads? */ |
| #ifdef DEBUG_QUEUING_LOCKS |
| if ( head <= 0 || tail <= 0 ) __kmp_dump_queuing_lock( this_thr, gtid, lck, head, tail ); |
| #endif |
| KMP_DEBUG_ASSERT( head > 0 && tail > 0 ); |
| |
| /* try (h,t)->(h',t) or (t,t) */ |
| |
| KMP_MB(); |
| /* make sure enqueuing thread has time to update next waiting thread field */ |
| *head_id_p = KMP_WAIT_YIELD((volatile kmp_uint32*)waiting_id_p, 0, KMP_NEQ, NULL); |
| #ifdef DEBUG_QUEUING_LOCKS |
| TRACE_LOCK( gtid+1, "rel deq: (h,t)->(h',t)" ); |
| #endif |
| dequeued = TRUE; |
| } |
| } |
| |
| if ( dequeued ) { |
| kmp_info_t *head_thr = __kmp_thread_from_gtid( head - 1 ); |
| KMP_DEBUG_ASSERT( head_thr != NULL ); |
| |
| /* Does this require synchronous reads? */ |
| #ifdef DEBUG_QUEUING_LOCKS |
| if ( head <= 0 || tail <= 0 ) __kmp_dump_queuing_lock( this_thr, gtid, lck, head, tail ); |
| #endif |
| KMP_DEBUG_ASSERT( head > 0 && tail > 0 ); |
| |
| /* For clean code only. |
| * Thread not released until next statement prevents race with acquire code. |
| */ |
| head_thr->th.th_next_waiting = 0; |
| #ifdef DEBUG_QUEUING_LOCKS |
| TRACE_LOCK_T( gtid+1, "rel nw=0 for t=", head ); |
| #endif |
| |
| KMP_MB(); |
| /* reset spin value */ |
| head_thr->th.th_spin_here = FALSE; |
| |
| KA_TRACE( 1000, ("__kmp_release_queuing_lock: lck:%p, T#%d exiting: after dequeuing\n", |
| lck, gtid )); |
| #ifdef DEBUG_QUEUING_LOCKS |
| TRACE_LOCK( gtid+1, "rel exit 2" ); |
| #endif |
| return KMP_LOCK_RELEASED; |
| } |
| /* KMP_CPU_PAUSE( ); don't want to make releasing thread hold up acquiring threads */ |
| |
| #ifdef DEBUG_QUEUING_LOCKS |
| TRACE_LOCK( gtid+1, "rel retry" ); |
| #endif |
| |
| } /* while */ |
| KMP_ASSERT2( 0, "should not get here" ); |
| return KMP_LOCK_RELEASED; |
| } |
| |
| static int |
| __kmp_release_queuing_lock_with_checks( kmp_queuing_lock_t *lck, |
| kmp_int32 gtid ) |
| { |
| char const * const func = "omp_unset_lock"; |
| KMP_MB(); /* in case another processor initialized lock */ |
| if ( lck->lk.initialized != lck ) { |
| KMP_FATAL( LockIsUninitialized, func ); |
| } |
| if ( __kmp_is_queuing_lock_nestable( lck ) ) { |
| KMP_FATAL( LockNestableUsedAsSimple, func ); |
| } |
| if ( __kmp_get_queuing_lock_owner( lck ) == -1 ) { |
| KMP_FATAL( LockUnsettingFree, func ); |
| } |
| if ( __kmp_get_queuing_lock_owner( lck ) != gtid ) { |
| KMP_FATAL( LockUnsettingSetByAnother, func ); |
| } |
| lck->lk.owner_id = 0; |
| return __kmp_release_queuing_lock( lck, gtid ); |
| } |
| |
| void |
| __kmp_init_queuing_lock( kmp_queuing_lock_t *lck ) |
| { |
| lck->lk.location = NULL; |
| lck->lk.head_id = 0; |
| lck->lk.tail_id = 0; |
| lck->lk.next_ticket = 0; |
| lck->lk.now_serving = 0; |
| lck->lk.owner_id = 0; // no thread owns the lock. |
| lck->lk.depth_locked = -1; // >= 0 for nestable locks, -1 for simple locks. |
| lck->lk.initialized = lck; |
| |
| KA_TRACE(1000, ("__kmp_init_queuing_lock: lock %p initialized\n", lck)); |
| } |
| |
| static void |
| __kmp_init_queuing_lock_with_checks( kmp_queuing_lock_t * lck ) |
| { |
| __kmp_init_queuing_lock( lck ); |
| } |
| |
| void |
| __kmp_destroy_queuing_lock( kmp_queuing_lock_t *lck ) |
| { |
| lck->lk.initialized = NULL; |
| lck->lk.location = NULL; |
| lck->lk.head_id = 0; |
| lck->lk.tail_id = 0; |
| lck->lk.next_ticket = 0; |
| lck->lk.now_serving = 0; |
| lck->lk.owner_id = 0; |
| lck->lk.depth_locked = -1; |
| } |
| |
| static void |
| __kmp_destroy_queuing_lock_with_checks( kmp_queuing_lock_t *lck ) |
| { |
| char const * const func = "omp_destroy_lock"; |
| if ( lck->lk.initialized != lck ) { |
| KMP_FATAL( LockIsUninitialized, func ); |
| } |
| if ( __kmp_is_queuing_lock_nestable( lck ) ) { |
| KMP_FATAL( LockNestableUsedAsSimple, func ); |
| } |
| if ( __kmp_get_queuing_lock_owner( lck ) != -1 ) { |
| KMP_FATAL( LockStillOwned, func ); |
| } |
| __kmp_destroy_queuing_lock( lck ); |
| } |
| |
| |
| // |
| // nested queuing locks |
| // |
| |
| int |
| __kmp_acquire_nested_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid ) |
| { |
| KMP_DEBUG_ASSERT( gtid >= 0 ); |
| |
| if ( __kmp_get_queuing_lock_owner( lck ) == gtid ) { |
| lck->lk.depth_locked += 1; |
| return KMP_LOCK_ACQUIRED_NEXT; |
| } |
| else { |
| __kmp_acquire_queuing_lock_timed_template<false>( lck, gtid ); |
| ANNOTATE_QUEUING_ACQUIRED(lck); |
| KMP_MB(); |
| lck->lk.depth_locked = 1; |
| KMP_MB(); |
| lck->lk.owner_id = gtid + 1; |
| return KMP_LOCK_ACQUIRED_FIRST; |
| } |
| } |
| |
| static int |
| __kmp_acquire_nested_queuing_lock_with_checks( kmp_queuing_lock_t *lck, kmp_int32 gtid ) |
| { |
| char const * const func = "omp_set_nest_lock"; |
| if ( lck->lk.initialized != lck ) { |
| KMP_FATAL( LockIsUninitialized, func ); |
| } |
| if ( ! __kmp_is_queuing_lock_nestable( lck ) ) { |
| KMP_FATAL( LockSimpleUsedAsNestable, func ); |
| } |
| return __kmp_acquire_nested_queuing_lock( lck, gtid ); |
| } |
| |
| int |
| __kmp_test_nested_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid ) |
| { |
| int retval; |
| |
| KMP_DEBUG_ASSERT( gtid >= 0 ); |
| |
| if ( __kmp_get_queuing_lock_owner( lck ) == gtid ) { |
| retval = ++lck->lk.depth_locked; |
| } |
| else if ( !__kmp_test_queuing_lock( lck, gtid ) ) { |
| retval = 0; |
| } |
| else { |
| KMP_MB(); |
| retval = lck->lk.depth_locked = 1; |
| KMP_MB(); |
| lck->lk.owner_id = gtid + 1; |
| } |
| return retval; |
| } |
| |
| static int |
| __kmp_test_nested_queuing_lock_with_checks( kmp_queuing_lock_t *lck, |
| kmp_int32 gtid ) |
| { |
| char const * const func = "omp_test_nest_lock"; |
| if ( lck->lk.initialized != lck ) { |
| KMP_FATAL( LockIsUninitialized, func ); |
| } |
| if ( ! __kmp_is_queuing_lock_nestable( lck ) ) { |
| KMP_FATAL( LockSimpleUsedAsNestable, func ); |
| } |
| return __kmp_test_nested_queuing_lock( lck, gtid ); |
| } |
| |
| int |
| __kmp_release_nested_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid ) |
| { |
| KMP_DEBUG_ASSERT( gtid >= 0 ); |
| |
| KMP_MB(); |
| if ( --(lck->lk.depth_locked) == 0 ) { |
| KMP_MB(); |
| lck->lk.owner_id = 0; |
| __kmp_release_queuing_lock( lck, gtid ); |
| return KMP_LOCK_RELEASED; |
| } |
| return KMP_LOCK_STILL_HELD; |
| } |
| |
| static int |
| __kmp_release_nested_queuing_lock_with_checks( kmp_queuing_lock_t *lck, kmp_int32 gtid ) |
| { |
| char const * const func = "omp_unset_nest_lock"; |
| KMP_MB(); /* in case another processor initialized lock */ |
| if ( lck->lk.initialized != lck ) { |
| KMP_FATAL( LockIsUninitialized, func ); |
| } |
| if ( ! __kmp_is_queuing_lock_nestable( lck ) ) { |
| KMP_FATAL( LockSimpleUsedAsNestable, func ); |
| } |
| if ( __kmp_get_queuing_lock_owner( lck ) == -1 ) { |
| KMP_FATAL( LockUnsettingFree, func ); |
| } |
| if ( __kmp_get_queuing_lock_owner( lck ) != gtid ) { |
| KMP_FATAL( LockUnsettingSetByAnother, func ); |
| } |
| return __kmp_release_nested_queuing_lock( lck, gtid ); |
| } |
| |
| void |
| __kmp_init_nested_queuing_lock( kmp_queuing_lock_t * lck ) |
| { |
| __kmp_init_queuing_lock( lck ); |
| lck->lk.depth_locked = 0; // >= 0 for nestable locks, -1 for simple locks |
| } |
| |
| static void |
| __kmp_init_nested_queuing_lock_with_checks( kmp_queuing_lock_t * lck ) |
| { |
| __kmp_init_nested_queuing_lock( lck ); |
| } |
| |
| void |
| __kmp_destroy_nested_queuing_lock( kmp_queuing_lock_t *lck ) |
| { |
| __kmp_destroy_queuing_lock( lck ); |
| lck->lk.depth_locked = 0; |
| } |
| |
| static void |
| __kmp_destroy_nested_queuing_lock_with_checks( kmp_queuing_lock_t *lck ) |
| { |
| char const * const func = "omp_destroy_nest_lock"; |
| if ( lck->lk.initialized != lck ) { |
| KMP_FATAL( LockIsUninitialized, func ); |
| } |
| if ( ! __kmp_is_queuing_lock_nestable( lck ) ) { |
| KMP_FATAL( LockSimpleUsedAsNestable, func ); |
| } |
| if ( __kmp_get_queuing_lock_owner( lck ) != -1 ) { |
| KMP_FATAL( LockStillOwned, func ); |
| } |
| __kmp_destroy_nested_queuing_lock( lck ); |
| } |
| |
| |
| // |
| // access functions to fields which don't exist for all lock kinds. |
| // |
| |
| static int |
| __kmp_is_queuing_lock_initialized( kmp_queuing_lock_t *lck ) |
| { |
| return lck == lck->lk.initialized; |
| } |
| |
| static const ident_t * |
| __kmp_get_queuing_lock_location( kmp_queuing_lock_t *lck ) |
| { |
| return lck->lk.location; |
| } |
| |
| static void |
| __kmp_set_queuing_lock_location( kmp_queuing_lock_t *lck, const ident_t *loc ) |
| { |
| lck->lk.location = loc; |
| } |
| |
| static kmp_lock_flags_t |
| __kmp_get_queuing_lock_flags( kmp_queuing_lock_t *lck ) |
| { |
| return lck->lk.flags; |
| } |
| |
| static void |
| __kmp_set_queuing_lock_flags( kmp_queuing_lock_t *lck, kmp_lock_flags_t flags ) |
| { |
| lck->lk.flags = flags; |
| } |
| |
| #if KMP_USE_ADAPTIVE_LOCKS |
| |
| /* |
| RTM Adaptive locks |
| */ |
| |
| #if KMP_COMPILER_ICC && __INTEL_COMPILER >= 1300 |
| |
| #include <immintrin.h> |
| #define SOFT_ABORT_MASK (_XABORT_RETRY | _XABORT_CONFLICT | _XABORT_EXPLICIT) |
| |
| #else |
| |
| // Values from the status register after failed speculation. |
| #define _XBEGIN_STARTED (~0u) |
| #define _XABORT_EXPLICIT (1 << 0) |
| #define _XABORT_RETRY (1 << 1) |
| #define _XABORT_CONFLICT (1 << 2) |
| #define _XABORT_CAPACITY (1 << 3) |
| #define _XABORT_DEBUG (1 << 4) |
| #define _XABORT_NESTED (1 << 5) |
| #define _XABORT_CODE(x) ((unsigned char)(((x) >> 24) & 0xFF)) |
| |
| // Aborts for which it's worth trying again immediately |
| #define SOFT_ABORT_MASK (_XABORT_RETRY | _XABORT_CONFLICT | _XABORT_EXPLICIT) |
| |
| #define STRINGIZE_INTERNAL(arg) #arg |
| #define STRINGIZE(arg) STRINGIZE_INTERNAL(arg) |
| |
| // Access to RTM instructions |
| |
| /* |
| A version of XBegin which returns -1 on speculation, and the value of EAX on an abort. |
| This is the same definition as the compiler intrinsic that will be supported at some point. |
| */ |
| static __inline int _xbegin() |
| { |
| int res = -1; |
| |
| #if KMP_OS_WINDOWS |
| #if KMP_ARCH_X86_64 |
| _asm { |
| _emit 0xC7 |
| _emit 0xF8 |
| _emit 2 |
| _emit 0 |
| _emit 0 |
| _emit 0 |
| jmp L2 |
| mov res, eax |
| L2: |
| } |
| #else /* IA32 */ |
| _asm { |
| _emit 0xC7 |
| _emit 0xF8 |
| _emit 2 |
| _emit 0 |
| _emit 0 |
| _emit 0 |
| jmp L2 |
| mov res, eax |
| L2: |
| } |
| #endif // KMP_ARCH_X86_64 |
| #else |
| /* Note that %eax must be noted as killed (clobbered), because |
| * the XSR is returned in %eax(%rax) on abort. Other register |
| * values are restored, so don't need to be killed. |
| * |
| * We must also mark 'res' as an input and an output, since otherwise |
| * 'res=-1' may be dropped as being dead, whereas we do need the |
| * assignment on the successful (i.e., non-abort) path. |
| */ |
| __asm__ volatile ("1: .byte 0xC7; .byte 0xF8;\n" |
| " .long 1f-1b-6\n" |
| " jmp 2f\n" |
| "1: movl %%eax,%0\n" |
| "2:" |
| :"+r"(res)::"memory","%eax"); |
| #endif // KMP_OS_WINDOWS |
| return res; |
| } |
| |
| /* |
| Transaction end |
| */ |
| static __inline void _xend() |
| { |
| #if KMP_OS_WINDOWS |
| __asm { |
| _emit 0x0f |
| _emit 0x01 |
| _emit 0xd5 |
| } |
| #else |
| __asm__ volatile (".byte 0x0f; .byte 0x01; .byte 0xd5" :::"memory"); |
| #endif |
| } |
| |
| /* |
| This is a macro, the argument must be a single byte constant which |
| can be evaluated by the inline assembler, since it is emitted as a |
| byte into the assembly code. |
| */ |
| #if KMP_OS_WINDOWS |
| #define _xabort(ARG) \ |
| _asm _emit 0xc6 \ |
| _asm _emit 0xf8 \ |
| _asm _emit ARG |
| #else |
| #define _xabort(ARG) \ |
| __asm__ volatile (".byte 0xC6; .byte 0xF8; .byte " STRINGIZE(ARG) :::"memory"); |
| #endif |
| |
| #endif // KMP_COMPILER_ICC && __INTEL_COMPILER >= 1300 |
| |
| // |
| // Statistics is collected for testing purpose |
| // |
| #if KMP_DEBUG_ADAPTIVE_LOCKS |
| |
| // We accumulate speculative lock statistics when the lock is destroyed. |
| // We keep locks that haven't been destroyed in the liveLocks list |
| // so that we can grab their statistics too. |
| static kmp_adaptive_lock_statistics_t destroyedStats; |
| |
| // To hold the list of live locks. |
| static kmp_adaptive_lock_info_t liveLocks; |
| |
| // A lock so we can safely update the list of locks. |
| static kmp_bootstrap_lock_t chain_lock; |
| |
| // Initialize the list of stats. |
| void |
| __kmp_init_speculative_stats() |
| { |
| kmp_adaptive_lock_info_t *lck = &liveLocks; |
| |
| memset( ( void * ) & ( lck->stats ), 0, sizeof( lck->stats ) ); |
| lck->stats.next = lck; |
| lck->stats.prev = lck; |
| |
| KMP_ASSERT( lck->stats.next->stats.prev == lck ); |
| KMP_ASSERT( lck->stats.prev->stats.next == lck ); |
| |
| __kmp_init_bootstrap_lock( &chain_lock ); |
| |
| } |
| |
| // Insert the lock into the circular list |
| static void |
| __kmp_remember_lock( kmp_adaptive_lock_info_t * lck ) |
| { |
| __kmp_acquire_bootstrap_lock( &chain_lock ); |
| |
| lck->stats.next = liveLocks.stats.next; |
| lck->stats.prev = &liveLocks; |
| |
| liveLocks.stats.next = lck; |
| lck->stats.next->stats.prev = lck; |
| |
| KMP_ASSERT( lck->stats.next->stats.prev == lck ); |
| KMP_ASSERT( lck->stats.prev->stats.next == lck ); |
| |
| __kmp_release_bootstrap_lock( &chain_lock ); |
| } |
| |
| static void |
| __kmp_forget_lock( kmp_adaptive_lock_info_t * lck ) |
| { |
| KMP_ASSERT( lck->stats.next->stats.prev == lck ); |
| KMP_ASSERT( lck->stats.prev->stats.next == lck ); |
| |
| kmp_adaptive_lock_info_t * n = lck->stats.next; |
| kmp_adaptive_lock_info_t * p = lck->stats.prev; |
| |
| n->stats.prev = p; |
| p->stats.next = n; |
| } |
| |
| static void |
| __kmp_zero_speculative_stats( kmp_adaptive_lock_info_t * lck ) |
| { |
| memset( ( void * )&lck->stats, 0, sizeof( lck->stats ) ); |
| __kmp_remember_lock( lck ); |
| } |
| |
| static void |
| __kmp_add_stats( kmp_adaptive_lock_statistics_t * t, kmp_adaptive_lock_info_t * lck ) |
| { |
| kmp_adaptive_lock_statistics_t volatile *s = &lck->stats; |
| |
| t->nonSpeculativeAcquireAttempts += lck->acquire_attempts; |
| t->successfulSpeculations += s->successfulSpeculations; |
| t->hardFailedSpeculations += s->hardFailedSpeculations; |
| t->softFailedSpeculations += s->softFailedSpeculations; |
| t->nonSpeculativeAcquires += s->nonSpeculativeAcquires; |
| t->lemmingYields += s->lemmingYields; |
| } |
| |
| static void |
| __kmp_accumulate_speculative_stats( kmp_adaptive_lock_info_t * lck) |
| { |
| kmp_adaptive_lock_statistics_t *t = &destroyedStats; |
| |
| __kmp_acquire_bootstrap_lock( &chain_lock ); |
| |
| __kmp_add_stats( &destroyedStats, lck ); |
| __kmp_forget_lock( lck ); |
| |
| __kmp_release_bootstrap_lock( &chain_lock ); |
| } |
| |
| static float |
| percent (kmp_uint32 count, kmp_uint32 total) |
| { |
| return (total == 0) ? 0.0: (100.0 * count)/total; |
| } |
| |
| static |
| FILE * __kmp_open_stats_file() |
| { |
| if (strcmp (__kmp_speculative_statsfile, "-") == 0) |
| return stdout; |
| |
| size_t buffLen = KMP_STRLEN( __kmp_speculative_statsfile ) + 20; |
| char buffer[buffLen]; |
| KMP_SNPRINTF (&buffer[0], buffLen, __kmp_speculative_statsfile, |
| (kmp_int32)getpid()); |
| FILE * result = fopen(&buffer[0], "w"); |
| |
| // Maybe we should issue a warning here... |
| return result ? result : stdout; |
| } |
| |
| void |
| __kmp_print_speculative_stats() |
| { |
| if (__kmp_user_lock_kind != lk_adaptive) |
| return; |
| |
| FILE * statsFile = __kmp_open_stats_file(); |
| |
| kmp_adaptive_lock_statistics_t total = destroyedStats; |
| kmp_adaptive_lock_info_t *lck; |
| |
| for (lck = liveLocks.stats.next; lck != &liveLocks; lck = lck->stats.next) { |
| __kmp_add_stats( &total, lck ); |
| } |
| kmp_adaptive_lock_statistics_t *t = &total; |
| kmp_uint32 totalSections = t->nonSpeculativeAcquires + t->successfulSpeculations; |
| kmp_uint32 totalSpeculations = t->successfulSpeculations + t->hardFailedSpeculations + |
| t->softFailedSpeculations; |
| |
| fprintf ( statsFile, "Speculative lock statistics (all approximate!)\n"); |
| fprintf ( statsFile, " Lock parameters: \n" |
| " max_soft_retries : %10d\n" |
| " max_badness : %10d\n", |
| __kmp_adaptive_backoff_params.max_soft_retries, |
| __kmp_adaptive_backoff_params.max_badness); |
| fprintf( statsFile, " Non-speculative acquire attempts : %10d\n", t->nonSpeculativeAcquireAttempts ); |
| fprintf( statsFile, " Total critical sections : %10d\n", totalSections ); |
| fprintf( statsFile, " Successful speculations : %10d (%5.1f%%)\n", |
| t->successfulSpeculations, percent( t->successfulSpeculations, totalSections ) ); |
| fprintf( statsFile, " Non-speculative acquires : %10d (%5.1f%%)\n", |
| t->nonSpeculativeAcquires, percent( t->nonSpeculativeAcquires, totalSections ) ); |
| fprintf( statsFile, " Lemming yields : %10d\n\n", t->lemmingYields ); |
| |
| fprintf( statsFile, " Speculative acquire attempts : %10d\n", totalSpeculations ); |
| fprintf( statsFile, " Successes : %10d (%5.1f%%)\n", |
| t->successfulSpeculations, percent( t->successfulSpeculations, totalSpeculations ) ); |
| fprintf( statsFile, " Soft failures : %10d (%5.1f%%)\n", |
| t->softFailedSpeculations, percent( t->softFailedSpeculations, totalSpeculations ) ); |
| fprintf( statsFile, " Hard failures : %10d (%5.1f%%)\n", |
| t->hardFailedSpeculations, percent( t->hardFailedSpeculations, totalSpeculations ) ); |
| |
| if (statsFile != stdout) |
| fclose( statsFile ); |
| } |
| |
| # define KMP_INC_STAT(lck,stat) ( lck->lk.adaptive.stats.stat++ ) |
| #else |
| # define KMP_INC_STAT(lck,stat) |
| |
| #endif // KMP_DEBUG_ADAPTIVE_LOCKS |
| |
| static inline bool |
| __kmp_is_unlocked_queuing_lock( kmp_queuing_lock_t *lck ) |
| { |
| // It is enough to check that the head_id is zero. |
| // We don't also need to check the tail. |
| bool res = lck->lk.head_id == 0; |
| |
| // We need a fence here, since we must ensure that no memory operations |
| // from later in this thread float above that read. |
| #if KMP_COMPILER_ICC |
| _mm_mfence(); |
| #else |
| __sync_synchronize(); |
| #endif |
| |
| return res; |
| } |
| |
| // Functions for manipulating the badness |
| static __inline void |
| __kmp_update_badness_after_success( kmp_adaptive_lock_t *lck ) |
| { |
| // Reset the badness to zero so we eagerly try to speculate again |
| lck->lk.adaptive.badness = 0; |
| KMP_INC_STAT(lck,successfulSpeculations); |
| } |
| |
| // Create a bit mask with one more set bit. |
| static __inline void |
| __kmp_step_badness( kmp_adaptive_lock_t *lck ) |
| { |
| kmp_uint32 newBadness = ( lck->lk.adaptive.badness << 1 ) | 1; |
| if ( newBadness > lck->lk.adaptive.max_badness) { |
| return; |
| } else { |
| lck->lk.adaptive.badness = newBadness; |
| } |
| } |
| |
| // Check whether speculation should be attempted. |
| static __inline int |
| __kmp_should_speculate( kmp_adaptive_lock_t *lck, kmp_int32 gtid ) |
| { |
| kmp_uint32 badness = lck->lk.adaptive.badness; |
| kmp_uint32 attempts= lck->lk.adaptive.acquire_attempts; |
| int res = (attempts & badness) == 0; |
| return res; |
| } |
| |
| // Attempt to acquire only the speculative lock. |
| // Does not back off to the non-speculative lock. |
| // |
| static int |
| __kmp_test_adaptive_lock_only( kmp_adaptive_lock_t * lck, kmp_int32 gtid ) |
| { |
| int retries = lck->lk.adaptive.max_soft_retries; |
| |
| // We don't explicitly count the start of speculation, rather we record |
| // the results (success, hard fail, soft fail). The sum of all of those |
| // is the total number of times we started speculation since all |
| // speculations must end one of those ways. |
| do |
| { |
| kmp_uint32 status = _xbegin(); |
| // Switch this in to disable actual speculation but exercise |
| // at least some of the rest of the code. Useful for debugging... |
| // kmp_uint32 status = _XABORT_NESTED; |
| |
| if (status == _XBEGIN_STARTED ) |
| { /* We have successfully started speculation |
| * Check that no-one acquired the lock for real between when we last looked |
| * and now. This also gets the lock cache line into our read-set, |
| * which we need so that we'll abort if anyone later claims it for real. |
| */ |
| if (! __kmp_is_unlocked_queuing_lock( GET_QLK_PTR(lck) ) ) |
| { |
| // Lock is now visibly acquired, so someone beat us to it. |
| // Abort the transaction so we'll restart from _xbegin with the |
| // failure status. |
| _xabort(0x01); |
| KMP_ASSERT2( 0, "should not get here" ); |
| } |
| return 1; // Lock has been acquired (speculatively) |
| } else { |
| // We have aborted, update the statistics |
| if ( status & SOFT_ABORT_MASK) |
| { |
| KMP_INC_STAT(lck,softFailedSpeculations); |
| // and loop round to retry. |
| } |
| else |
| { |
| KMP_INC_STAT(lck,hardFailedSpeculations); |
| // Give up if we had a hard failure. |
| break; |
| } |
| } |
| } while( retries-- ); // Loop while we have retries, and didn't fail hard. |
| |
| // Either we had a hard failure or we didn't succeed softly after |
| // the full set of attempts, so back off the badness. |
| __kmp_step_badness( lck ); |
| return 0; |
| } |
| |
| // Attempt to acquire the speculative lock, or back off to the non-speculative one |
| // if the speculative lock cannot be acquired. |
| // We can succeed speculatively, non-speculatively, or fail. |
| static int |
| __kmp_test_adaptive_lock( kmp_adaptive_lock_t *lck, kmp_int32 gtid ) |
| { |
| // First try to acquire the lock speculatively |
| if ( __kmp_should_speculate( lck, gtid ) && __kmp_test_adaptive_lock_only( lck, gtid ) ) |
| return 1; |
| |
| // Speculative acquisition failed, so try to acquire it non-speculatively. |
| // Count the non-speculative acquire attempt |
| lck->lk.adaptive.acquire_attempts++; |
| |
| // Use base, non-speculative lock. |
| if ( __kmp_test_queuing_lock( GET_QLK_PTR(lck), gtid ) ) |
| { |
| KMP_INC_STAT(lck,nonSpeculativeAcquires); |
| return 1; // Lock is acquired (non-speculatively) |
| } |
| else |
| { |
| return 0; // Failed to acquire the lock, it's already visibly locked. |
| } |
| } |
| |
| static int |
| __kmp_test_adaptive_lock_with_checks( kmp_adaptive_lock_t *lck, kmp_int32 gtid ) |
| { |
| char const * const func = "omp_test_lock"; |
| if ( lck->lk.qlk.initialized != GET_QLK_PTR(lck) ) { |
| KMP_FATAL( LockIsUninitialized, func ); |
| } |
| |
| int retval = __kmp_test_adaptive_lock( lck, gtid ); |
| |
| if ( retval ) { |
| lck->lk.qlk.owner_id = gtid + 1; |
| } |
| return retval; |
| } |
| |
| // Block until we can acquire a speculative, adaptive lock. |
| // We check whether we should be trying to speculate. |
| // If we should be, we check the real lock to see if it is free, |
| // and, if not, pause without attempting to acquire it until it is. |
| // Then we try the speculative acquire. |
| // This means that although we suffer from lemmings a little ( |
| // because all we can't acquire the lock speculatively until |
| // the queue of threads waiting has cleared), we don't get into a |
| // state where we can never acquire the lock speculatively (because we |
| // force the queue to clear by preventing new arrivals from entering the |
| // queue). |
| // This does mean that when we're trying to break lemmings, the lock |
| // is no longer fair. However OpenMP makes no guarantee that its |
| // locks are fair, so this isn't a real problem. |
| static void |
| __kmp_acquire_adaptive_lock( kmp_adaptive_lock_t * lck, kmp_int32 gtid ) |
| { |
| if ( __kmp_should_speculate( lck, gtid ) ) |
| { |
| if ( __kmp_is_unlocked_queuing_lock( GET_QLK_PTR(lck) ) ) |
| { |
| if ( __kmp_test_adaptive_lock_only( lck , gtid ) ) |
| return; |
| // We tried speculation and failed, so give up. |
| } |
| else |
| { |
| // We can't try speculation until the lock is free, so we |
| // pause here (without suspending on the queueing lock, |
| // to allow it to drain, then try again. |
| // All other threads will also see the same result for |
| // shouldSpeculate, so will be doing the same if they |
| // try to claim the lock from now on. |
| while ( ! __kmp_is_unlocked_queuing_lock( GET_QLK_PTR(lck) ) ) |
| { |
| KMP_INC_STAT(lck,lemmingYields); |
| __kmp_yield (TRUE); |
| } |
| |
| if ( __kmp_test_adaptive_lock_only( lck, gtid ) ) |
| return; |
| } |
| } |
| |
| // Speculative acquisition failed, so acquire it non-speculatively. |
| // Count the non-speculative acquire attempt |
| lck->lk.adaptive.acquire_attempts++; |
| |
| __kmp_acquire_queuing_lock_timed_template<FALSE>( GET_QLK_PTR(lck), gtid ); |
| // We have acquired the base lock, so count that. |
| KMP_INC_STAT(lck,nonSpeculativeAcquires ); |
| ANNOTATE_QUEUING_ACQUIRED(lck); |
| } |
| |
| static void |
| __kmp_acquire_adaptive_lock_with_checks( kmp_adaptive_lock_t *lck, kmp_int32 gtid ) |
| { |
| char const * const func = "omp_set_lock"; |
| if ( lck->lk.qlk.initialized != GET_QLK_PTR(lck) ) { |
| KMP_FATAL( LockIsUninitialized, func ); |
| } |
| if ( __kmp_get_queuing_lock_owner( GET_QLK_PTR(lck) ) == gtid ) { |
| KMP_FATAL( LockIsAlreadyOwned, func ); |
| } |
| |
| __kmp_acquire_adaptive_lock( lck, gtid ); |
| |
| lck->lk.qlk.owner_id = gtid + 1; |
| } |
| |
| static int |
| __kmp_release_adaptive_lock( kmp_adaptive_lock_t *lck, kmp_int32 gtid ) |
| { |
| if ( __kmp_is_unlocked_queuing_lock( GET_QLK_PTR(lck) ) ) |
| { // If the lock doesn't look claimed we must be speculating. |
| // (Or the user's code is buggy and they're releasing without locking; |
| // if we had XTEST we'd be able to check that case...) |
| _xend(); // Exit speculation |
| __kmp_update_badness_after_success( lck ); |
| } |
| else |
| { // Since the lock *is* visibly locked we're not speculating, |
| // so should use the underlying lock's release scheme. |
| __kmp_release_queuing_lock( GET_QLK_PTR(lck), gtid ); |
| } |
| return KMP_LOCK_RELEASED; |
| } |
| |
| static int |
| __kmp_release_adaptive_lock_with_checks( kmp_adaptive_lock_t *lck, kmp_int32 gtid ) |
| { |
| char const * const func = "omp_unset_lock"; |
| KMP_MB(); /* in case another processor initialized lock */ |
| if ( lck->lk.qlk.initialized != GET_QLK_PTR(lck) ) { |
| KMP_FATAL( LockIsUninitialized, func ); |
| } |
| if ( __kmp_get_queuing_lock_owner( GET_QLK_PTR(lck) ) == -1 ) { |
| KMP_FATAL( LockUnsettingFree, func ); |
| } |
| if ( __kmp_get_queuing_lock_owner( GET_QLK_PTR(lck) ) != gtid ) { |
| KMP_FATAL( LockUnsettingSetByAnother, func ); |
| } |
| lck->lk.qlk.owner_id = 0; |
| __kmp_release_adaptive_lock( lck, gtid ); |
| return KMP_LOCK_RELEASED; |
| } |
| |
| static void |
| __kmp_init_adaptive_lock( kmp_adaptive_lock_t *lck ) |
| { |
| __kmp_init_queuing_lock( GET_QLK_PTR(lck) ); |
| lck->lk.adaptive.badness = 0; |
| lck->lk.adaptive.acquire_attempts = 0; //nonSpeculativeAcquireAttempts = 0; |
| lck->lk.adaptive.max_soft_retries = __kmp_adaptive_backoff_params.max_soft_retries; |
| lck->lk.adaptive.max_badness = __kmp_adaptive_backoff_params.max_badness; |
| #if KMP_DEBUG_ADAPTIVE_LOCKS |
| __kmp_zero_speculative_stats( &lck->lk.adaptive ); |
| #endif |
| KA_TRACE(1000, ("__kmp_init_adaptive_lock: lock %p initialized\n", lck)); |
| } |
| |
| static void |
| __kmp_init_adaptive_lock_with_checks( kmp_adaptive_lock_t * lck ) |
| { |
| __kmp_init_adaptive_lock( lck ); |
| } |
| |
| static void |
| __kmp_destroy_adaptive_lock( kmp_adaptive_lock_t *lck ) |
| { |
| #if KMP_DEBUG_ADAPTIVE_LOCKS |
| __kmp_accumulate_speculative_stats( &lck->lk.adaptive ); |
| #endif |
| __kmp_destroy_queuing_lock (GET_QLK_PTR(lck)); |
| // Nothing needed for the speculative part. |
| } |
| |
| static void |
| __kmp_destroy_adaptive_lock_with_checks( kmp_adaptive_lock_t *lck ) |
| { |
| char const * const func = "omp_destroy_lock"; |
| if ( lck->lk.qlk.initialized != GET_QLK_PTR(lck) ) { |
| KMP_FATAL( LockIsUninitialized, func ); |
| } |
| if ( __kmp_get_queuing_lock_owner( GET_QLK_PTR(lck) ) != -1 ) { |
| KMP_FATAL( LockStillOwned, func ); |
| } |
| __kmp_destroy_adaptive_lock( lck ); |
| } |
| |
| |
| #endif // KMP_USE_ADAPTIVE_LOCKS |
| |
| |
| /* ------------------------------------------------------------------------ */ |
| /* DRDPA ticket locks */ |
| /* "DRDPA" means Dynamically Reconfigurable Distributed Polling Area */ |
| |
| static kmp_int32 |
| __kmp_get_drdpa_lock_owner( kmp_drdpa_lock_t *lck ) |
| { |
| return TCR_4( lck->lk.owner_id ) - 1; |
| } |
| |
| static inline bool |
| __kmp_is_drdpa_lock_nestable( kmp_drdpa_lock_t *lck ) |
| { |
| return lck->lk.depth_locked != -1; |
| } |
| |
| __forceinline static int |
| __kmp_acquire_drdpa_lock_timed_template( kmp_drdpa_lock_t *lck, kmp_int32 gtid ) |
| { |
| kmp_uint64 ticket = KMP_TEST_THEN_INC64((kmp_int64 *)&lck->lk.next_ticket); |
| kmp_uint64 mask = TCR_8(lck->lk.mask); // volatile load |
| volatile struct kmp_base_drdpa_lock::kmp_lock_poll *polls |
| = (volatile struct kmp_base_drdpa_lock::kmp_lock_poll *) |
| TCR_PTR(lck->lk.polls); // volatile load |
| |
| #ifdef USE_LOCK_PROFILE |
| if (TCR_8(polls[ticket & mask].poll) != ticket) |
| __kmp_printf("LOCK CONTENTION: %p\n", lck); |
| /* else __kmp_printf( "." );*/ |
| #endif /* USE_LOCK_PROFILE */ |
| |
| // |
| // Now spin-wait, but reload the polls pointer and mask, in case the |
| // polling area has been reconfigured. Unless it is reconfigured, the |
| // reloads stay in L1 cache and are cheap. |
| // |
| // Keep this code in sync with KMP_WAIT_YIELD, in kmp_dispatch.cpp !!! |
| // |
| // The current implementation of KMP_WAIT_YIELD doesn't allow for mask |
| // and poll to be re-read every spin iteration. |
| // |
| kmp_uint32 spins; |
| |
| KMP_FSYNC_PREPARE(lck); |
| KMP_INIT_YIELD(spins); |
| while (TCR_8(polls[ticket & mask].poll) < ticket) { // volatile load |
| // If we are oversubscribed, |
| // or have waited a bit (and KMP_LIBRARY=turnaround), then yield. |
| // CPU Pause is in the macros for yield. |
| // |
| KMP_YIELD(TCR_4(__kmp_nth) |
| > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)); |
| KMP_YIELD_SPIN(spins); |
| |
| // Re-read the mask and the poll pointer from the lock structure. |
| // |
| // Make certain that "mask" is read before "polls" !!! |
| // |
| // If another thread picks reconfigures the polling area and updates |
| // their values, and we get the new value of mask and the old polls |
| // pointer, we could access memory beyond the end of the old polling |
| // area. |
| // |
| mask = TCR_8(lck->lk.mask); // volatile load |
| polls = (volatile struct kmp_base_drdpa_lock::kmp_lock_poll *) |
| TCR_PTR(lck->lk.polls); // volatile load |
| } |
| |
| // |
| // Critical section starts here |
| // |
| KMP_FSYNC_ACQUIRED(lck); |
| KA_TRACE(1000, ("__kmp_acquire_drdpa_lock: ticket #%lld acquired lock %p\n", |
| ticket, lck)); |
| lck->lk.now_serving = ticket; // non-volatile store |
| |
| // |
| // Deallocate a garbage polling area if we know that we are the last |
| // thread that could possibly access it. |
| // |
| // The >= check is in case __kmp_test_drdpa_lock() allocated the cleanup |
| // ticket. |
| // |
| if ((lck->lk.old_polls != NULL) && (ticket >= lck->lk.cleanup_ticket)) { |
| __kmp_free((void *)lck->lk.old_polls); |
| lck->lk.old_polls = NULL; |
| lck->lk.cleanup_ticket = 0; |
| } |
| |
| // |
| // Check to see if we should reconfigure the polling area. |
| // If there is still a garbage polling area to be deallocated from a |
| // previous reconfiguration, let a later thread reconfigure it. |
| // |
| if (lck->lk.old_polls == NULL) { |
| bool reconfigure = false; |
| volatile struct kmp_base_drdpa_lock::kmp_lock_poll *old_polls = polls; |
| kmp_uint32 num_polls = TCR_4(lck->lk.num_polls); |
| |
| if (TCR_4(__kmp_nth) |
| > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)) { |
| // |
| // We are in oversubscription mode. Contract the polling area |
| // down to a single location, if that hasn't been done already. |
| // |
| if (num_polls > 1) { |
| reconfigure = true; |
| num_polls = TCR_4(lck->lk.num_polls); |
| mask = 0; |
| num_polls = 1; |
| polls = (volatile struct kmp_base_drdpa_lock::kmp_lock_poll *) |
| __kmp_allocate(num_polls * sizeof(*polls)); |
| polls[0].poll = ticket; |
| } |
| } |
| else { |
| // |
| // We are in under/fully subscribed mode. Check the number of |
| // threads waiting on the lock. The size of the polling area |
| // should be at least the number of threads waiting. |
| // |
| kmp_uint64 num_waiting = TCR_8(lck->lk.next_ticket) - ticket - 1; |
| if (num_waiting > num_polls) { |
| kmp_uint32 old_num_polls = num_polls; |
| reconfigure = true; |
| do { |
| mask = (mask << 1) | 1; |
| num_polls *= 2; |
| } while (num_polls <= num_waiting); |
| |
| // |
| // Allocate the new polling area, and copy the relevant portion |
| // of the old polling area to the new area. __kmp_allocate() |
| // zeroes the memory it allocates, and most of the old area is |
| // just zero padding, so we only copy the release counters. |
| // |
| polls = (volatile struct kmp_base_drdpa_lock::kmp_lock_poll *) |
| __kmp_allocate(num_polls * sizeof(*polls)); |
| kmp_uint32 i; |
| for (i = 0; i < old_num_polls; i++) { |
| polls[i].poll = old_polls[i].poll; |
| } |
| } |
| } |
| |
| if (reconfigure) { |
| // |
| // Now write the updated fields back to the lock structure. |
| // |
| // Make certain that "polls" is written before "mask" !!! |
| // |
| // If another thread picks up the new value of mask and the old |
| // polls pointer , it could access memory beyond the end of the |
| // old polling area. |
| // |
| // On x86, we need memory fences. |
| // |
| KA_TRACE(1000, ("__kmp_acquire_drdpa_lock: ticket #%lld reconfiguring lock %p to %d polls\n", |
| ticket, lck, num_polls)); |
| |
| lck->lk.old_polls = old_polls; // non-volatile store |
| lck->lk.polls = polls; // volatile store |
| |
| KMP_MB(); |
| |
| lck->lk.num_polls = num_polls; // non-volatile store |
| lck->lk.mask = mask; // volatile store |
| |
| KMP_MB(); |
| |
| // |
| // Only after the new polling area and mask have been flushed |
| // to main memory can we update the cleanup ticket field. |
| // |
| // volatile load / non-volatile store |
| // |
| lck->lk.cleanup_ticket = TCR_8(lck->lk.next_ticket); |
| } |
| } |
| return KMP_LOCK_ACQUIRED_FIRST; |
| } |
| |
| int |
| __kmp_acquire_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid ) |
| { |
| int retval = __kmp_acquire_drdpa_lock_timed_template( lck, gtid ); |
| ANNOTATE_DRDPA_ACQUIRED(lck); |
| return retval; |
| } |
| |
| static int |
| __kmp_acquire_drdpa_lock_with_checks( kmp_drdpa_lock_t *lck, kmp_int32 gtid ) |
| { |
| char const * const func = "omp_set_lock"; |
| if ( lck->lk.initialized != lck ) { |
| KMP_FATAL( LockIsUninitialized, func ); |
| } |
| if ( __kmp_is_drdpa_lock_nestable( lck ) ) { |
| KMP_FATAL( LockNestableUsedAsSimple, func ); |
| } |
| if ( ( gtid >= 0 ) && ( __kmp_get_drdpa_lock_owner( lck ) == gtid ) ) { |
| KMP_FATAL( LockIsAlreadyOwned, func ); |
| } |
| |
| __kmp_acquire_drdpa_lock( lck, gtid ); |
| |
| lck->lk.owner_id = gtid + 1; |
| return KMP_LOCK_ACQUIRED_FIRST; |
| } |
| |
| int |
| __kmp_test_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid ) |
| { |
| // |
| // First get a ticket, then read the polls pointer and the mask. |
| // The polls pointer must be read before the mask!!! (See above) |
| // |
| kmp_uint64 ticket = TCR_8(lck->lk.next_ticket); // volatile load |
| volatile struct kmp_base_drdpa_lock::kmp_lock_poll *polls |
| = (volatile struct kmp_base_drdpa_lock::kmp_lock_poll *) |
| TCR_PTR(lck->lk.polls); // volatile load |
| kmp_uint64 mask = TCR_8(lck->lk.mask); // volatile load |
| if (TCR_8(polls[ticket & mask].poll) == ticket) { |
| kmp_uint64 next_ticket = ticket + 1; |
| if (KMP_COMPARE_AND_STORE_ACQ64((kmp_int64 *)&lck->lk.next_ticket, |
| ticket, next_ticket)) { |
| KMP_FSYNC_ACQUIRED(lck); |
| KA_TRACE(1000, ("__kmp_test_drdpa_lock: ticket #%lld acquired lock %p\n", |
| ticket, lck)); |
| lck->lk.now_serving = ticket; // non-volatile store |
| |
| // |
| // Since no threads are waiting, there is no possibility that |
| // we would want to reconfigure the polling area. We might |
| // have the cleanup ticket value (which says that it is now |
| // safe to deallocate old_polls), but we'll let a later thread |
| // which calls __kmp_acquire_lock do that - this routine |
| // isn't supposed to block, and we would risk blocks if we |
| // called __kmp_free() to do the deallocation. |
| // |
| return TRUE; |
| } |
| } |
| return FALSE; |
| } |
| |
| static int |
| __kmp_test_drdpa_lock_with_checks( kmp_drdpa_lock_t *lck, kmp_int32 gtid ) |
| { |
| char const * const func = "omp_test_lock"; |
| if ( lck->lk.initialized != lck ) { |
| KMP_FATAL( LockIsUninitialized, func ); |
| } |
| if ( __kmp_is_drdpa_lock_nestable( lck ) ) { |
| KMP_FATAL( LockNestableUsedAsSimple, func ); |
| } |
| |
| int retval = __kmp_test_drdpa_lock( lck, gtid ); |
| |
| if ( retval ) { |
| lck->lk.owner_id = gtid + 1; |
| } |
| return retval; |
| } |
| |
| int |
| __kmp_release_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid ) |
| { |
| // |
| // Read the ticket value from the lock data struct, then the polls |
| // pointer and the mask. The polls pointer must be read before the |
| // mask!!! (See above) |
| // |
| kmp_uint64 ticket = lck->lk.now_serving + 1; // non-volatile load |
| volatile struct kmp_base_drdpa_lock::kmp_lock_poll *polls |
| = (volatile struct kmp_base_drdpa_lock::kmp_lock_poll *) |
| TCR_PTR(lck->lk.polls); // volatile load |
| kmp_uint64 mask = TCR_8(lck->lk.mask); // volatile load |
| KA_TRACE(1000, ("__kmp_release_drdpa_lock: ticket #%lld released lock %p\n", |
| ticket - 1, lck)); |
| KMP_FSYNC_RELEASING(lck); |
| ANNOTATE_DRDPA_RELEASED(lck); |
| KMP_ST_REL64(&(polls[ticket & mask].poll), ticket); // volatile store |
| return KMP_LOCK_RELEASED; |
| } |
| |
| static int |
| __kmp_release_drdpa_lock_with_checks( kmp_drdpa_lock_t *lck, kmp_int32 gtid ) |
| { |
| char const * const func = "omp_unset_lock"; |
| KMP_MB(); /* in case another processor initialized lock */ |
| if ( lck->lk.initialized != lck ) { |
| KMP_FATAL( LockIsUninitialized, func ); |
| } |
| if ( __kmp_is_drdpa_lock_nestable( lck ) ) { |
| KMP_FATAL( LockNestableUsedAsSimple, func ); |
| } |
| if ( __kmp_get_drdpa_lock_owner( lck ) == -1 ) { |
| KMP_FATAL( LockUnsettingFree, func ); |
| } |
| if ( ( gtid >= 0 ) && ( __kmp_get_drdpa_lock_owner( lck ) >= 0 ) |
| && ( __kmp_get_drdpa_lock_owner( lck ) != gtid ) ) { |
| KMP_FATAL( LockUnsettingSetByAnother, func ); |
| } |
| lck->lk.owner_id = 0; |
| return __kmp_release_drdpa_lock( lck, gtid ); |
| } |
| |
| void |
| __kmp_init_drdpa_lock( kmp_drdpa_lock_t *lck ) |
| { |
| lck->lk.location = NULL; |
| lck->lk.mask = 0; |
| lck->lk.num_polls = 1; |
| lck->lk.polls = (volatile struct kmp_base_drdpa_lock::kmp_lock_poll *) |
| __kmp_allocate(lck->lk.num_polls * sizeof(*(lck->lk.polls))); |
| lck->lk.cleanup_ticket = 0; |
| lck->lk.old_polls = NULL; |
| lck->lk.next_ticket = 0; |
| lck->lk.now_serving = 0; |
| lck->lk.owner_id = 0; // no thread owns the lock. |
| lck->lk.depth_locked = -1; // >= 0 for nestable locks, -1 for simple locks. |
| lck->lk.initialized = lck; |
| |
| KA_TRACE(1000, ("__kmp_init_drdpa_lock: lock %p initialized\n", lck)); |
| } |
| |
| static void |
| __kmp_init_drdpa_lock_with_checks( kmp_drdpa_lock_t * lck ) |
| { |
| __kmp_init_drdpa_lock( lck ); |
| } |
| |
| void |
| __kmp_destroy_drdpa_lock( kmp_drdpa_lock_t *lck ) |
| { |
| lck->lk.initialized = NULL; |
| lck->lk.location = NULL; |
| if (lck->lk.polls != NULL) { |
| __kmp_free((void *)lck->lk.polls); |
| lck->lk.polls = NULL; |
| } |
| if (lck->lk.old_polls != NULL) { |
| __kmp_free((void *)lck->lk.old_polls); |
| lck->lk.old_polls = NULL; |
| } |
| lck->lk.mask = 0; |
| lck->lk.num_polls = 0; |
| lck->lk.cleanup_ticket = 0; |
| lck->lk.next_ticket = 0; |
| lck->lk.now_serving = 0; |
| lck->lk.owner_id = 0; |
| lck->lk.depth_locked = -1; |
| } |
| |
| static void |
| __kmp_destroy_drdpa_lock_with_checks( kmp_drdpa_lock_t *lck ) |
| { |
| char const * const func = "omp_destroy_lock"; |
| if ( lck->lk.initialized != lck ) { |
| KMP_FATAL( LockIsUninitialized, func ); |
| } |
| if ( __kmp_is_drdpa_lock_nestable( lck ) ) { |
| KMP_FATAL( LockNestableUsedAsSimple, func ); |
| } |
| if ( __kmp_get_drdpa_lock_owner( lck ) != -1 ) { |
| KMP_FATAL( LockStillOwned, func ); |
| } |
| __kmp_destroy_drdpa_lock( lck ); |
| } |
| |
| |
| // |
| // nested drdpa ticket locks |
| // |
| |
| int |
| __kmp_acquire_nested_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid ) |
| { |
| KMP_DEBUG_ASSERT( gtid >= 0 ); |
| |
| if ( __kmp_get_drdpa_lock_owner( lck ) == gtid ) { |
| lck->lk.depth_locked += 1; |
| return KMP_LOCK_ACQUIRED_NEXT; |
| } |
| else { |
| __kmp_acquire_drdpa_lock_timed_template( lck, gtid ); |
| ANNOTATE_DRDPA_ACQUIRED(lck); |
| KMP_MB(); |
| lck->lk.depth_locked = 1; |
| KMP_MB(); |
| lck->lk.owner_id = gtid + 1; |
| return KMP_LOCK_ACQUIRED_FIRST; |
| } |
| } |
| |
| static void |
| __kmp_acquire_nested_drdpa_lock_with_checks( kmp_drdpa_lock_t *lck, kmp_int32 gtid ) |
| { |
| char const * const func = "omp_set_nest_lock"; |
| if ( lck->lk.initialized != lck ) { |
| KMP_FATAL( LockIsUninitialized, func ); |
| } |
| if ( ! __kmp_is_drdpa_lock_nestable( lck ) ) { |
| KMP_FATAL( LockSimpleUsedAsNestable, func ); |
| } |
| __kmp_acquire_nested_drdpa_lock( lck, gtid ); |
| } |
| |
| int |
| __kmp_test_nested_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid ) |
| { |
| int retval; |
| |
| KMP_DEBUG_ASSERT( gtid >= 0 ); |
| |
| if ( __kmp_get_drdpa_lock_owner( lck ) == gtid ) { |
| retval = ++lck->lk.depth_locked; |
| } |
| else if ( !__kmp_test_drdpa_lock( lck, gtid ) ) { |
| retval = 0; |
| } |
| else { |
| KMP_MB(); |
| retval = lck->lk.depth_locked = 1; |
| KMP_MB(); |
| lck->lk.owner_id = gtid + 1; |
| } |
| return retval; |
| } |
| |
| static int |
| __kmp_test_nested_drdpa_lock_with_checks( kmp_drdpa_lock_t *lck, kmp_int32 gtid ) |
| { |
| char const * const func = "omp_test_nest_lock"; |
| if ( lck->lk.initialized != lck ) { |
| KMP_FATAL( LockIsUninitialized, func ); |
| } |
| if ( ! __kmp_is_drdpa_lock_nestable( lck ) ) { |
| KMP_FATAL( LockSimpleUsedAsNestable, func ); |
| } |
| return __kmp_test_nested_drdpa_lock( lck, gtid ); |
| } |
| |
| int |
| __kmp_release_nested_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid ) |
| { |
| KMP_DEBUG_ASSERT( gtid >= 0 ); |
| |
| KMP_MB(); |
| if ( --(lck->lk.depth_locked) == 0 ) { |
| KMP_MB(); |
| lck->lk.owner_id = 0; |
| __kmp_release_drdpa_lock( lck, gtid ); |
| return KMP_LOCK_RELEASED; |
| } |
| return KMP_LOCK_STILL_HELD; |
| } |
| |
| static int |
| __kmp_release_nested_drdpa_lock_with_checks( kmp_drdpa_lock_t *lck, kmp_int32 gtid ) |
| { |
| char const * const func = "omp_unset_nest_lock"; |
| KMP_MB(); /* in case another processor initialized lock */ |
| if ( lck->lk.initialized != lck ) { |
| KMP_FATAL( LockIsUninitialized, func ); |
| } |
| if ( ! __kmp_is_drdpa_lock_nestable( lck ) ) { |
| KMP_FATAL( LockSimpleUsedAsNestable, func ); |
| } |
| if ( __kmp_get_drdpa_lock_owner( lck ) == -1 ) { |
| KMP_FATAL( LockUnsettingFree, func ); |
| } |
| if ( __kmp_get_drdpa_lock_owner( lck ) != gtid ) { |
| KMP_FATAL( LockUnsettingSetByAnother, func ); |
| } |
| return __kmp_release_nested_drdpa_lock( lck, gtid ); |
| } |
| |
| void |
| __kmp_init_nested_drdpa_lock( kmp_drdpa_lock_t * lck ) |
| { |
| __kmp_init_drdpa_lock( lck ); |
| lck->lk.depth_locked = 0; // >= 0 for nestable locks, -1 for simple locks |
| } |
| |
| static void |
| __kmp_init_nested_drdpa_lock_with_checks( kmp_drdpa_lock_t * lck ) |
| { |
| __kmp_init_nested_drdpa_lock( lck ); |
| } |
| |
| void |
| __kmp_destroy_nested_drdpa_lock( kmp_drdpa_lock_t *lck ) |
| { |
| __kmp_destroy_drdpa_lock( lck ); |
| lck->lk.depth_locked = 0; |
| } |
| |
| static void |
| __kmp_destroy_nested_drdpa_lock_with_checks( kmp_drdpa_lock_t *lck ) |
| { |
| char const * const func = "omp_destroy_nest_lock"; |
| if ( lck->lk.initialized != lck ) { |
| KMP_FATAL( LockIsUninitialized, func ); |
| } |
| if ( ! __kmp_is_drdpa_lock_nestable( lck ) ) { |
| KMP_FATAL( LockSimpleUsedAsNestable, func ); |
| } |
| if ( __kmp_get_drdpa_lock_owner( lck ) != -1 ) { |
| KMP_FATAL( LockStillOwned, func ); |
| } |
| __kmp_destroy_nested_drdpa_lock( lck ); |
| } |
| |
|