32#define KMP_PAD(type, sz) \
33 (sizeof(type) + (sz - ((sizeof(type) - 1) % (sz)) - 1))
34#define KMP_GTID_DNE (-2)
53#if KMP_OS_LINUX && defined(KMP_GOMP_COMPAT)
54#define OMP_LOCK_T_SIZE sizeof(int)
55#define OMP_NEST_LOCK_T_SIZE sizeof(void *)
57#define OMP_LOCK_T_SIZE sizeof(void *)
58#define OMP_NEST_LOCK_T_SIZE sizeof(void *)
64#define OMP_CRITICAL_SIZE sizeof(void *)
65#define INTEL_CRITICAL_SIZE 32
68typedef kmp_uint32 kmp_lock_flags_t;
70#define kmp_lf_critical_section 1
73typedef kmp_uint32 kmp_lock_index_t;
78 union kmp_user_lock *next;
79 kmp_lock_index_t index;
82typedef struct kmp_lock_pool kmp_lock_pool_t;
84extern void __kmp_validate_locks(
void);
121struct kmp_base_tas_lock {
123 std::atomic<kmp_int32> poll;
124 kmp_int32 depth_locked;
127typedef struct kmp_base_tas_lock kmp_base_tas_lock_t;
130 kmp_base_tas_lock_t lk;
131 kmp_lock_pool_t pool;
135typedef union kmp_tas_lock kmp_tas_lock_t;
139#define KMP_TAS_LOCK_INITIALIZER(lock) \
141 { ATOMIC_VAR_INIT(KMP_LOCK_FREE(tas)), 0 } \
144extern int __kmp_acquire_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid);
145extern int __kmp_test_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid);
146extern int __kmp_release_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid);
147extern void __kmp_init_tas_lock(kmp_tas_lock_t *lck);
148extern void __kmp_destroy_tas_lock(kmp_tas_lock_t *lck);
150extern int __kmp_acquire_nested_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid);
151extern int __kmp_test_nested_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid);
152extern int __kmp_release_nested_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid);
153extern void __kmp_init_nested_tas_lock(kmp_tas_lock_t *lck);
154extern void __kmp_destroy_nested_tas_lock(kmp_tas_lock_t *lck);
156#define KMP_LOCK_RELEASED 1
157#define KMP_LOCK_STILL_HELD 0
158#define KMP_LOCK_ACQUIRED_FIRST 1
159#define KMP_LOCK_ACQUIRED_NEXT 0
161#define KMP_USE_FUTEX \
163 (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64))
178struct kmp_base_futex_lock {
179 volatile kmp_int32 poll;
182 kmp_int32 depth_locked;
185typedef struct kmp_base_futex_lock kmp_base_futex_lock_t;
187union kmp_futex_lock {
188 kmp_base_futex_lock_t lk;
189 kmp_lock_pool_t pool;
194typedef union kmp_futex_lock kmp_futex_lock_t;
198#define KMP_FUTEX_LOCK_INITIALIZER(lock) \
200 { KMP_LOCK_FREE(futex), 0 } \
203extern int __kmp_acquire_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid);
204extern int __kmp_test_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid);
205extern int __kmp_release_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid);
206extern void __kmp_init_futex_lock(kmp_futex_lock_t *lck);
207extern void __kmp_destroy_futex_lock(kmp_futex_lock_t *lck);
209extern int __kmp_acquire_nested_futex_lock(kmp_futex_lock_t *lck,
211extern int __kmp_test_nested_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid);
212extern int __kmp_release_nested_futex_lock(kmp_futex_lock_t *lck,
214extern void __kmp_init_nested_futex_lock(kmp_futex_lock_t *lck);
215extern void __kmp_destroy_nested_futex_lock(kmp_futex_lock_t *lck);
228struct kmp_base_ticket_lock {
230 std::atomic_bool initialized;
231 volatile union kmp_ticket_lock *self;
235 std::atomic_uint now_serving;
236 std::atomic_int owner_id;
237 std::atomic_int depth_locked;
238 kmp_lock_flags_t flags;
241struct kmp_base_ticket_lock {
243 std::atomic<bool> initialized;
244 volatile union kmp_ticket_lock *self;
246 std::atomic<unsigned>
248 std::atomic<unsigned>
250 std::atomic<int> owner_id;
251 std::atomic<int> depth_locked;
252 kmp_lock_flags_t flags;
258struct kmp_base_ticket_lock;
262typedef struct kmp_base_ticket_lock kmp_base_ticket_lock_t;
264union KMP_ALIGN_CACHE kmp_ticket_lock {
265 kmp_base_ticket_lock_t
267 kmp_lock_pool_t pool;
269 char lk_pad[KMP_PAD(kmp_base_ticket_lock_t, CACHE_LINE)];
272typedef union kmp_ticket_lock kmp_ticket_lock_t;
277#define KMP_TICKET_LOCK_INITIALIZER(lock) \
280 ATOMIC_VAR_INIT(true) \
281 , &(lock), NULL, ATOMIC_VAR_INIT(0U), ATOMIC_VAR_INIT(0U), \
282 ATOMIC_VAR_INIT(0), ATOMIC_VAR_INIT(-1) \
286extern int __kmp_acquire_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid);
287extern int __kmp_test_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid);
288extern int __kmp_test_ticket_lock_with_cheks(kmp_ticket_lock_t *lck,
290extern int __kmp_release_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid);
291extern void __kmp_init_ticket_lock(kmp_ticket_lock_t *lck);
292extern void __kmp_destroy_ticket_lock(kmp_ticket_lock_t *lck);
294extern int __kmp_acquire_nested_ticket_lock(kmp_ticket_lock_t *lck,
296extern int __kmp_test_nested_ticket_lock(kmp_ticket_lock_t *lck,
298extern int __kmp_release_nested_ticket_lock(kmp_ticket_lock_t *lck,
300extern void __kmp_init_nested_ticket_lock(kmp_ticket_lock_t *lck);
301extern void __kmp_destroy_nested_ticket_lock(kmp_ticket_lock_t *lck);
306#if KMP_USE_ADAPTIVE_LOCKS
308struct kmp_adaptive_lock_info;
310typedef struct kmp_adaptive_lock_info kmp_adaptive_lock_info_t;
312#if KMP_DEBUG_ADAPTIVE_LOCKS
314struct kmp_adaptive_lock_statistics {
316 kmp_adaptive_lock_info_t *next;
317 kmp_adaptive_lock_info_t *prev;
320 kmp_uint32 successfulSpeculations;
321 kmp_uint32 hardFailedSpeculations;
322 kmp_uint32 softFailedSpeculations;
323 kmp_uint32 nonSpeculativeAcquires;
324 kmp_uint32 nonSpeculativeAcquireAttempts;
325 kmp_uint32 lemmingYields;
328typedef struct kmp_adaptive_lock_statistics kmp_adaptive_lock_statistics_t;
330extern void __kmp_print_speculative_stats();
331extern void __kmp_init_speculative_stats();
335struct kmp_adaptive_lock_info {
340 kmp_uint32
volatile badness;
341 kmp_uint32
volatile acquire_attempts;
343 kmp_uint32 max_badness;
344 kmp_uint32 max_soft_retries;
346#if KMP_DEBUG_ADAPTIVE_LOCKS
347 kmp_adaptive_lock_statistics_t
volatile stats;
353struct kmp_base_queuing_lock {
356 volatile union kmp_queuing_lock
374 volatile kmp_int32 owner_id;
375 kmp_int32 depth_locked;
377 kmp_lock_flags_t flags;
380typedef struct kmp_base_queuing_lock kmp_base_queuing_lock_t;
382KMP_BUILD_ASSERT(offsetof(kmp_base_queuing_lock_t, tail_id) % 8 == 0);
384union KMP_ALIGN_CACHE kmp_queuing_lock {
385 kmp_base_queuing_lock_t
387 kmp_lock_pool_t pool;
389 char lk_pad[KMP_PAD(kmp_base_queuing_lock_t, CACHE_LINE)];
392typedef union kmp_queuing_lock kmp_queuing_lock_t;
394extern int __kmp_acquire_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid);
395extern int __kmp_test_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid);
396extern int __kmp_release_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid);
397extern void __kmp_init_queuing_lock(kmp_queuing_lock_t *lck);
398extern void __kmp_destroy_queuing_lock(kmp_queuing_lock_t *lck);
400extern int __kmp_acquire_nested_queuing_lock(kmp_queuing_lock_t *lck,
402extern int __kmp_test_nested_queuing_lock(kmp_queuing_lock_t *lck,
404extern int __kmp_release_nested_queuing_lock(kmp_queuing_lock_t *lck,
406extern void __kmp_init_nested_queuing_lock(kmp_queuing_lock_t *lck);
407extern void __kmp_destroy_nested_queuing_lock(kmp_queuing_lock_t *lck);
409#if KMP_USE_ADAPTIVE_LOCKS
413struct kmp_base_adaptive_lock {
414 kmp_base_queuing_lock qlk;
415 KMP_ALIGN(CACHE_LINE)
416 kmp_adaptive_lock_info_t
420typedef struct kmp_base_adaptive_lock kmp_base_adaptive_lock_t;
422union KMP_ALIGN_CACHE kmp_adaptive_lock {
423 kmp_base_adaptive_lock_t lk;
424 kmp_lock_pool_t pool;
426 char lk_pad[KMP_PAD(kmp_base_adaptive_lock_t, CACHE_LINE)];
428typedef union kmp_adaptive_lock kmp_adaptive_lock_t;
430#define GET_QLK_PTR(l) ((kmp_queuing_lock_t *)&(l)->lk.qlk)
436struct kmp_base_drdpa_lock {
445 volatile union kmp_drdpa_lock
448 std::atomic<std::atomic<kmp_uint64> *> polls;
449 std::atomic<kmp_uint64> mask;
450 kmp_uint64 cleanup_ticket;
451 std::atomic<kmp_uint64> *old_polls;
452 kmp_uint32 num_polls;
458 std::atomic<kmp_uint64> next_ticket;
473 kmp_uint64 now_serving;
474 volatile kmp_uint32 owner_id;
475 kmp_int32 depth_locked;
476 kmp_lock_flags_t flags;
479typedef struct kmp_base_drdpa_lock kmp_base_drdpa_lock_t;
481union KMP_ALIGN_CACHE kmp_drdpa_lock {
482 kmp_base_drdpa_lock_t
484 kmp_lock_pool_t pool;
486 char lk_pad[KMP_PAD(kmp_base_drdpa_lock_t, CACHE_LINE)];
489typedef union kmp_drdpa_lock kmp_drdpa_lock_t;
491extern int __kmp_acquire_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid);
492extern int __kmp_test_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid);
493extern int __kmp_release_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid);
494extern void __kmp_init_drdpa_lock(kmp_drdpa_lock_t *lck);
495extern void __kmp_destroy_drdpa_lock(kmp_drdpa_lock_t *lck);
497extern int __kmp_acquire_nested_drdpa_lock(kmp_drdpa_lock_t *lck,
499extern int __kmp_test_nested_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid);
500extern int __kmp_release_nested_drdpa_lock(kmp_drdpa_lock_t *lck,
502extern void __kmp_init_nested_drdpa_lock(kmp_drdpa_lock_t *lck);
503extern void __kmp_destroy_nested_drdpa_lock(kmp_drdpa_lock_t *lck);
517typedef kmp_ticket_lock_t kmp_bootstrap_lock_t;
519#define KMP_BOOTSTRAP_LOCK_INITIALIZER(lock) KMP_TICKET_LOCK_INITIALIZER((lock))
520#define KMP_BOOTSTRAP_LOCK_INIT(lock) \
521 kmp_bootstrap_lock_t lock = KMP_TICKET_LOCK_INITIALIZER(lock)
523static inline int __kmp_acquire_bootstrap_lock(kmp_bootstrap_lock_t *lck) {
524 return __kmp_acquire_ticket_lock(lck, KMP_GTID_DNE);
527static inline int __kmp_test_bootstrap_lock(kmp_bootstrap_lock_t *lck) {
528 return __kmp_test_ticket_lock(lck, KMP_GTID_DNE);
531static inline void __kmp_release_bootstrap_lock(kmp_bootstrap_lock_t *lck) {
532 __kmp_release_ticket_lock(lck, KMP_GTID_DNE);
535static inline void __kmp_init_bootstrap_lock(kmp_bootstrap_lock_t *lck) {
536 __kmp_init_ticket_lock(lck);
539static inline void __kmp_destroy_bootstrap_lock(kmp_bootstrap_lock_t *lck) {
540 __kmp_destroy_ticket_lock(lck);
551typedef kmp_ticket_lock_t kmp_lock_t;
553#define KMP_LOCK_INIT(lock) kmp_lock_t lock = KMP_TICKET_LOCK_INITIALIZER(lock)
555static inline int __kmp_acquire_lock(kmp_lock_t *lck, kmp_int32 gtid) {
556 return __kmp_acquire_ticket_lock(lck, gtid);
559static inline int __kmp_test_lock(kmp_lock_t *lck, kmp_int32 gtid) {
560 return __kmp_test_ticket_lock(lck, gtid);
563static inline void __kmp_release_lock(kmp_lock_t *lck, kmp_int32 gtid) {
564 __kmp_release_ticket_lock(lck, gtid);
567static inline void __kmp_init_lock(kmp_lock_t *lck) {
568 __kmp_init_ticket_lock(lck);
571static inline void __kmp_destroy_lock(kmp_lock_t *lck) {
572 __kmp_destroy_ticket_lock(lck);
588#if KMP_USE_DYNAMIC_LOCK && KMP_USE_TSX
596#if KMP_USE_ADAPTIVE_LOCKS
601typedef enum kmp_lock_kind kmp_lock_kind_t;
603extern kmp_lock_kind_t __kmp_user_lock_kind;
608 kmp_futex_lock_t futex;
610 kmp_ticket_lock_t ticket;
611 kmp_queuing_lock_t queuing;
612 kmp_drdpa_lock_t drdpa;
613#if KMP_USE_ADAPTIVE_LOCKS
614 kmp_adaptive_lock_t adaptive;
616 kmp_lock_pool_t pool;
619typedef union kmp_user_lock *kmp_user_lock_p;
621#if !KMP_USE_DYNAMIC_LOCK
623extern size_t __kmp_base_user_lock_size;
624extern size_t __kmp_user_lock_size;
626extern kmp_int32 (*__kmp_get_user_lock_owner_)(kmp_user_lock_p lck);
628static inline kmp_int32 __kmp_get_user_lock_owner(kmp_user_lock_p lck) {
629 KMP_DEBUG_ASSERT(__kmp_get_user_lock_owner_ != NULL);
630 return (*__kmp_get_user_lock_owner_)(lck);
633extern int (*__kmp_acquire_user_lock_with_checks_)(kmp_user_lock_p lck,
637 (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
639#define __kmp_acquire_user_lock_with_checks(lck, gtid) \
640 if (__kmp_user_lock_kind == lk_tas) { \
641 if (__kmp_env_consistency_check) { \
642 char const *const func = "omp_set_lock"; \
643 if ((sizeof(kmp_tas_lock_t) <= OMP_LOCK_T_SIZE) && \
644 lck->tas.lk.depth_locked != -1) { \
645 KMP_FATAL(LockNestableUsedAsSimple, func); \
647 if ((gtid >= 0) && (lck->tas.lk.poll - 1 == gtid)) { \
648 KMP_FATAL(LockIsAlreadyOwned, func); \
651 if (lck->tas.lk.poll != 0 || \
652 !__kmp_atomic_compare_store_acq(&lck->tas.lk.poll, 0, gtid + 1)) { \
655 KMP_FSYNC_PREPARE(lck); \
656 KMP_INIT_YIELD(spins); \
657 KMP_INIT_BACKOFF(time); \
659 KMP_YIELD_OVERSUB_ELSE_SPIN(spins, time); \
661 lck->tas.lk.poll != 0 || \
662 !__kmp_atomic_compare_store_acq(&lck->tas.lk.poll, 0, gtid + 1)); \
664 KMP_FSYNC_ACQUIRED(lck); \
666 KMP_DEBUG_ASSERT(__kmp_acquire_user_lock_with_checks_ != NULL); \
667 (*__kmp_acquire_user_lock_with_checks_)(lck, gtid); \
671static inline int __kmp_acquire_user_lock_with_checks(kmp_user_lock_p lck,
673 KMP_DEBUG_ASSERT(__kmp_acquire_user_lock_with_checks_ != NULL);
674 return (*__kmp_acquire_user_lock_with_checks_)(lck, gtid);
678extern int (*__kmp_test_user_lock_with_checks_)(kmp_user_lock_p lck,
682 (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
685extern int __kmp_env_consistency_check;
686static inline int __kmp_test_user_lock_with_checks(kmp_user_lock_p lck,
688 if (__kmp_user_lock_kind == lk_tas) {
689 if (__kmp_env_consistency_check) {
690 char const *
const func =
"omp_test_lock";
691 if ((
sizeof(kmp_tas_lock_t) <= OMP_LOCK_T_SIZE) &&
692 lck->tas.lk.depth_locked != -1) {
693 KMP_FATAL(LockNestableUsedAsSimple, func);
696 return ((lck->tas.lk.poll == 0) &&
697 __kmp_atomic_compare_store_acq(&lck->tas.lk.poll, 0, gtid + 1));
699 KMP_DEBUG_ASSERT(__kmp_test_user_lock_with_checks_ != NULL);
700 return (*__kmp_test_user_lock_with_checks_)(lck, gtid);
704static inline int __kmp_test_user_lock_with_checks(kmp_user_lock_p lck,
706 KMP_DEBUG_ASSERT(__kmp_test_user_lock_with_checks_ != NULL);
707 return (*__kmp_test_user_lock_with_checks_)(lck, gtid);
711extern int (*__kmp_release_user_lock_with_checks_)(kmp_user_lock_p lck,
714static inline void __kmp_release_user_lock_with_checks(kmp_user_lock_p lck,
716 KMP_DEBUG_ASSERT(__kmp_release_user_lock_with_checks_ != NULL);
717 (*__kmp_release_user_lock_with_checks_)(lck, gtid);
720extern void (*__kmp_init_user_lock_with_checks_)(kmp_user_lock_p lck);
722static inline void __kmp_init_user_lock_with_checks(kmp_user_lock_p lck) {
723 KMP_DEBUG_ASSERT(__kmp_init_user_lock_with_checks_ != NULL);
724 (*__kmp_init_user_lock_with_checks_)(lck);
729extern void (*__kmp_destroy_user_lock_)(kmp_user_lock_p lck);
731static inline void __kmp_destroy_user_lock(kmp_user_lock_p lck) {
732 KMP_DEBUG_ASSERT(__kmp_destroy_user_lock_ != NULL);
733 (*__kmp_destroy_user_lock_)(lck);
736extern void (*__kmp_destroy_user_lock_with_checks_)(kmp_user_lock_p lck);
738static inline void __kmp_destroy_user_lock_with_checks(kmp_user_lock_p lck) {
739 KMP_DEBUG_ASSERT(__kmp_destroy_user_lock_with_checks_ != NULL);
740 (*__kmp_destroy_user_lock_with_checks_)(lck);
743extern int (*__kmp_acquire_nested_user_lock_with_checks_)(kmp_user_lock_p lck,
746#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64)
748#define __kmp_acquire_nested_user_lock_with_checks(lck, gtid, depth) \
749 if (__kmp_user_lock_kind == lk_tas) { \
750 if (__kmp_env_consistency_check) { \
751 char const *const func = "omp_set_nest_lock"; \
752 if ((sizeof(kmp_tas_lock_t) <= OMP_NEST_LOCK_T_SIZE) && \
753 lck->tas.lk.depth_locked == -1) { \
754 KMP_FATAL(LockSimpleUsedAsNestable, func); \
757 if (lck->tas.lk.poll - 1 == gtid) { \
758 lck->tas.lk.depth_locked += 1; \
759 *depth = KMP_LOCK_ACQUIRED_NEXT; \
761 if ((lck->tas.lk.poll != 0) || \
762 !__kmp_atomic_compare_store_acq(&lck->tas.lk.poll, 0, gtid + 1)) { \
765 KMP_FSYNC_PREPARE(lck); \
766 KMP_INIT_YIELD(spins); \
767 KMP_INIT_BACKOFF(time); \
769 KMP_YIELD_OVERSUB_ELSE_SPIN(spins, time); \
771 (lck->tas.lk.poll != 0) || \
772 !__kmp_atomic_compare_store_acq(&lck->tas.lk.poll, 0, gtid + 1)); \
774 lck->tas.lk.depth_locked = 1; \
775 *depth = KMP_LOCK_ACQUIRED_FIRST; \
777 KMP_FSYNC_ACQUIRED(lck); \
779 KMP_DEBUG_ASSERT(__kmp_acquire_nested_user_lock_with_checks_ != NULL); \
780 *depth = (*__kmp_acquire_nested_user_lock_with_checks_)(lck, gtid); \
785__kmp_acquire_nested_user_lock_with_checks(kmp_user_lock_p lck, kmp_int32 gtid,
787 KMP_DEBUG_ASSERT(__kmp_acquire_nested_user_lock_with_checks_ != NULL);
788 *depth = (*__kmp_acquire_nested_user_lock_with_checks_)(lck, gtid);
792extern int (*__kmp_test_nested_user_lock_with_checks_)(kmp_user_lock_p lck,
795#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64)
796static inline int __kmp_test_nested_user_lock_with_checks(kmp_user_lock_p lck,
798 if (__kmp_user_lock_kind == lk_tas) {
800 if (__kmp_env_consistency_check) {
801 char const *
const func =
"omp_test_nest_lock";
802 if ((
sizeof(kmp_tas_lock_t) <= OMP_NEST_LOCK_T_SIZE) &&
803 lck->tas.lk.depth_locked == -1) {
804 KMP_FATAL(LockSimpleUsedAsNestable, func);
807 KMP_DEBUG_ASSERT(gtid >= 0);
808 if (lck->tas.lk.poll - 1 ==
810 return ++lck->tas.lk.depth_locked;
812 retval = ((lck->tas.lk.poll == 0) &&
813 __kmp_atomic_compare_store_acq(&lck->tas.lk.poll, 0, gtid + 1));
816 lck->tas.lk.depth_locked = 1;
820 KMP_DEBUG_ASSERT(__kmp_test_nested_user_lock_with_checks_ != NULL);
821 return (*__kmp_test_nested_user_lock_with_checks_)(lck, gtid);
825static inline int __kmp_test_nested_user_lock_with_checks(kmp_user_lock_p lck,
827 KMP_DEBUG_ASSERT(__kmp_test_nested_user_lock_with_checks_ != NULL);
828 return (*__kmp_test_nested_user_lock_with_checks_)(lck, gtid);
832extern int (*__kmp_release_nested_user_lock_with_checks_)(kmp_user_lock_p lck,
836__kmp_release_nested_user_lock_with_checks(kmp_user_lock_p lck,
838 KMP_DEBUG_ASSERT(__kmp_release_nested_user_lock_with_checks_ != NULL);
839 return (*__kmp_release_nested_user_lock_with_checks_)(lck, gtid);
842extern void (*__kmp_init_nested_user_lock_with_checks_)(kmp_user_lock_p lck);
845__kmp_init_nested_user_lock_with_checks(kmp_user_lock_p lck) {
846 KMP_DEBUG_ASSERT(__kmp_init_nested_user_lock_with_checks_ != NULL);
847 (*__kmp_init_nested_user_lock_with_checks_)(lck);
850extern void (*__kmp_destroy_nested_user_lock_with_checks_)(kmp_user_lock_p lck);
853__kmp_destroy_nested_user_lock_with_checks(kmp_user_lock_p lck) {
854 KMP_DEBUG_ASSERT(__kmp_destroy_nested_user_lock_with_checks_ != NULL);
855 (*__kmp_destroy_nested_user_lock_with_checks_)(lck);
871extern int (*__kmp_is_user_lock_initialized_)(kmp_user_lock_p lck);
875extern const ident_t *(*__kmp_get_user_lock_location_)(kmp_user_lock_p lck);
877static inline const ident_t *__kmp_get_user_lock_location(kmp_user_lock_p lck) {
878 if (__kmp_get_user_lock_location_ != NULL) {
879 return (*__kmp_get_user_lock_location_)(lck);
885extern void (*__kmp_set_user_lock_location_)(kmp_user_lock_p lck,
888static inline void __kmp_set_user_lock_location(kmp_user_lock_p lck,
890 if (__kmp_set_user_lock_location_ != NULL) {
891 (*__kmp_set_user_lock_location_)(lck, loc);
895extern kmp_lock_flags_t (*__kmp_get_user_lock_flags_)(kmp_user_lock_p lck);
897extern void (*__kmp_set_user_lock_flags_)(kmp_user_lock_p lck,
898 kmp_lock_flags_t flags);
900static inline void __kmp_set_user_lock_flags(kmp_user_lock_p lck,
901 kmp_lock_flags_t flags) {
902 if (__kmp_set_user_lock_flags_ != NULL) {
903 (*__kmp_set_user_lock_flags_)(lck, flags);
908extern void __kmp_set_user_lock_vptrs(kmp_lock_kind_t user_lock_kind);
911#define KMP_BIND_USER_LOCK_TEMPLATE(nest, kind, suffix) \
913 __kmp_acquire##nest##user_lock_with_checks_ = (int (*)( \
914 kmp_user_lock_p, kmp_int32))__kmp_acquire##nest##kind##_##suffix; \
915 __kmp_release##nest##user_lock_with_checks_ = (int (*)( \
916 kmp_user_lock_p, kmp_int32))__kmp_release##nest##kind##_##suffix; \
917 __kmp_test##nest##user_lock_with_checks_ = (int (*)( \
918 kmp_user_lock_p, kmp_int32))__kmp_test##nest##kind##_##suffix; \
919 __kmp_init##nest##user_lock_with_checks_ = \
920 (void (*)(kmp_user_lock_p))__kmp_init##nest##kind##_##suffix; \
921 __kmp_destroy##nest##user_lock_with_checks_ = \
922 (void (*)(kmp_user_lock_p))__kmp_destroy##nest##kind##_##suffix; \
925#define KMP_BIND_USER_LOCK(kind) KMP_BIND_USER_LOCK_TEMPLATE(_, kind, lock)
926#define KMP_BIND_USER_LOCK_WITH_CHECKS(kind) \
927 KMP_BIND_USER_LOCK_TEMPLATE(_, kind, lock_with_checks)
928#define KMP_BIND_NESTED_USER_LOCK(kind) \
929 KMP_BIND_USER_LOCK_TEMPLATE(_nested_, kind, lock)
930#define KMP_BIND_NESTED_USER_LOCK_WITH_CHECKS(kind) \
931 KMP_BIND_USER_LOCK_TEMPLATE(_nested_, kind, lock_with_checks)
956struct kmp_lock_table {
957 kmp_lock_index_t used;
958 kmp_lock_index_t allocated;
959 kmp_user_lock_p *table;
962typedef struct kmp_lock_table kmp_lock_table_t;
964extern kmp_lock_table_t __kmp_user_lock_table;
965extern kmp_user_lock_p __kmp_lock_pool;
967struct kmp_block_of_locks {
968 struct kmp_block_of_locks *next_block;
972typedef struct kmp_block_of_locks kmp_block_of_locks_t;
974extern kmp_block_of_locks_t *__kmp_lock_blocks;
975extern int __kmp_num_locks_in_block;
977extern kmp_user_lock_p __kmp_user_lock_allocate(
void **user_lock,
979 kmp_lock_flags_t flags);
980extern void __kmp_user_lock_free(
void **user_lock, kmp_int32 gtid,
981 kmp_user_lock_p lck);
982extern kmp_user_lock_p __kmp_lookup_user_lock(
void **user_lock,
984extern void __kmp_cleanup_user_locks();
986#define KMP_CHECK_USER_LOCK_INIT() \
988 if (!TCR_4(__kmp_init_user_locks)) { \
989 __kmp_acquire_bootstrap_lock(&__kmp_initz_lock); \
990 if (!TCR_4(__kmp_init_user_locks)) { \
991 TCW_4(__kmp_init_user_locks, TRUE); \
993 __kmp_release_bootstrap_lock(&__kmp_initz_lock); \
1002#if KMP_USE_DYNAMIC_LOCK
1040#define KMP_USE_INLINED_TAS \
1041 (KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM)) && 1
1042#define KMP_USE_INLINED_FUTEX KMP_USE_FUTEX && 0
1049#define KMP_FOREACH_D_LOCK(m, a) m(tas, a) m(futex, a) m(hle, a) m(rtm_spin, a)
1050#define KMP_FOREACH_I_LOCK(m, a) \
1051 m(ticket, a) m(queuing, a) m(adaptive, a) m(drdpa, a) m(rtm_queuing, a) \
1052 m(nested_tas, a) m(nested_futex, a) m(nested_ticket, a) \
1053 m(nested_queuing, a) m(nested_drdpa, a)
1055#define KMP_FOREACH_D_LOCK(m, a) m(tas, a) m(hle, a) m(rtm_spin, a)
1056#define KMP_FOREACH_I_LOCK(m, a) \
1057 m(ticket, a) m(queuing, a) m(adaptive, a) m(drdpa, a) m(rtm_queuing, a) \
1058 m(nested_tas, a) m(nested_ticket, a) m(nested_queuing, a) \
1061#define KMP_LAST_D_LOCK lockseq_rtm_spin
1064#define KMP_FOREACH_D_LOCK(m, a) m(tas, a) m(futex, a)
1065#define KMP_FOREACH_I_LOCK(m, a) \
1066 m(ticket, a) m(queuing, a) m(drdpa, a) m(nested_tas, a) m(nested_futex, a) \
1067 m(nested_ticket, a) m(nested_queuing, a) m(nested_drdpa, a)
1068#define KMP_LAST_D_LOCK lockseq_futex
1070#define KMP_FOREACH_D_LOCK(m, a) m(tas, a)
1071#define KMP_FOREACH_I_LOCK(m, a) \
1072 m(ticket, a) m(queuing, a) m(drdpa, a) m(nested_tas, a) m(nested_ticket, a) \
1073 m(nested_queuing, a) m(nested_drdpa, a)
1074#define KMP_LAST_D_LOCK lockseq_tas
1079#define KMP_LOCK_SHIFT \
1081#define KMP_FIRST_D_LOCK lockseq_tas
1082#define KMP_FIRST_I_LOCK lockseq_ticket
1083#define KMP_LAST_I_LOCK lockseq_nested_drdpa
1084#define KMP_NUM_I_LOCKS \
1085 (locktag_nested_drdpa + 1)
1088typedef kmp_uint32 kmp_dyna_lock_t;
1093 lockseq_indirect = 0,
1094#define expand_seq(l, a) lockseq_##l,
1095 KMP_FOREACH_D_LOCK(expand_seq, 0) KMP_FOREACH_I_LOCK(expand_seq, 0)
1097} kmp_dyna_lockseq_t;
1101#define expand_tag(l, a) locktag_##l,
1102 KMP_FOREACH_I_LOCK(expand_tag, 0)
1104} kmp_indirect_locktag_t;
1107#define KMP_IS_D_LOCK(seq) \
1108 ((seq) >= KMP_FIRST_D_LOCK && (seq) <= KMP_LAST_D_LOCK)
1109#define KMP_IS_I_LOCK(seq) \
1110 ((seq) >= KMP_FIRST_I_LOCK && (seq) <= KMP_LAST_I_LOCK)
1111#define KMP_GET_I_TAG(seq) (kmp_indirect_locktag_t)((seq)-KMP_FIRST_I_LOCK)
1112#define KMP_GET_D_TAG(seq) ((seq) << 1 | 1)
1116#define expand_tag(l, a) locktag_##l = KMP_GET_D_TAG(lockseq_##l),
1117 KMP_FOREACH_D_LOCK(expand_tag, 0)
1119} kmp_direct_locktag_t;
1123 kmp_user_lock_p lock;
1124 kmp_indirect_locktag_t type;
1125} kmp_indirect_lock_t;
1129extern void (*__kmp_direct_init[])(kmp_dyna_lock_t *, kmp_dyna_lockseq_t);
1130extern void (**__kmp_direct_destroy)(kmp_dyna_lock_t *);
1131extern int (**__kmp_direct_set)(kmp_dyna_lock_t *, kmp_int32);
1132extern int (**__kmp_direct_unset)(kmp_dyna_lock_t *, kmp_int32);
1133extern int (**__kmp_direct_test)(kmp_dyna_lock_t *, kmp_int32);
1137extern void (*__kmp_indirect_init[])(kmp_user_lock_p);
1138extern void (**__kmp_indirect_destroy)(kmp_user_lock_p);
1139extern int (**__kmp_indirect_set)(kmp_user_lock_p, kmp_int32);
1140extern int (**__kmp_indirect_unset)(kmp_user_lock_p, kmp_int32);
1141extern int (**__kmp_indirect_test)(kmp_user_lock_p, kmp_int32);
1144#define KMP_EXTRACT_D_TAG(l) \
1145 (*((kmp_dyna_lock_t *)(l)) & ((1 << KMP_LOCK_SHIFT) - 1) & \
1146 -(*((kmp_dyna_lock_t *)(l)) & 1))
1149#define KMP_EXTRACT_I_INDEX(l) (*(kmp_lock_index_t *)(l) >> 1)
1153#define KMP_D_LOCK_FUNC(l, op) __kmp_direct_##op[KMP_EXTRACT_D_TAG(l)]
1157#define KMP_I_LOCK_FUNC(l, op) \
1158 __kmp_indirect_##op[((kmp_indirect_lock_t *)(l))->type]
1161#define KMP_INIT_D_LOCK(l, seq) \
1162 __kmp_direct_init[KMP_GET_D_TAG(seq)]((kmp_dyna_lock_t *)l, seq)
1165#define KMP_INIT_I_LOCK(l, seq) \
1166 __kmp_direct_init[0]((kmp_dyna_lock_t *)(l), seq)
1169#define KMP_LOCK_FREE(type) (locktag_##type)
1172#define KMP_LOCK_BUSY(v, type) ((v) << KMP_LOCK_SHIFT | locktag_##type)
1175#define KMP_LOCK_STRIP(v) ((v) >> KMP_LOCK_SHIFT)
1179extern void __kmp_init_dynamic_user_locks();
1182extern kmp_indirect_lock_t *
1183__kmp_allocate_indirect_lock(
void **, kmp_int32, kmp_indirect_locktag_t);
1186extern void __kmp_cleanup_indirect_user_locks();
1189extern kmp_dyna_lockseq_t __kmp_user_lock_seq;
1192extern void (*__kmp_indirect_set_location[KMP_NUM_I_LOCKS])(kmp_user_lock_p,
1194#define KMP_SET_I_LOCK_LOCATION(lck, loc) \
1196 if (__kmp_indirect_set_location[(lck)->type] != NULL) \
1197 __kmp_indirect_set_location[(lck)->type]((lck)->lock, loc); \
1201extern void (*__kmp_indirect_set_flags[KMP_NUM_I_LOCKS])(kmp_user_lock_p,
1203#define KMP_SET_I_LOCK_FLAGS(lck, flag) \
1205 if (__kmp_indirect_set_flags[(lck)->type] != NULL) \
1206 __kmp_indirect_set_flags[(lck)->type]((lck)->lock, flag); \
1210extern const ident_t *(*__kmp_indirect_get_location[KMP_NUM_I_LOCKS])(
1212#define KMP_GET_I_LOCK_LOCATION(lck) \
1213 (__kmp_indirect_get_location[(lck)->type] != NULL \
1214 ? __kmp_indirect_get_location[(lck)->type]((lck)->lock) \
1218extern kmp_lock_flags_t (*__kmp_indirect_get_flags[KMP_NUM_I_LOCKS])(
1220#define KMP_GET_I_LOCK_FLAGS(lck) \
1221 (__kmp_indirect_get_flags[(lck)->type] != NULL \
1222 ? __kmp_indirect_get_flags[(lck)->type]((lck)->lock) \
1226#define KMP_I_LOCK_CHUNK 1024
1228KMP_BUILD_ASSERT(KMP_I_LOCK_CHUNK % 2 == 0);
1230#define KMP_I_LOCK_TABLE_INIT_NROW_PTRS 8
1233typedef struct kmp_indirect_lock_table {
1234 kmp_indirect_lock_t **table;
1235 kmp_uint32 nrow_ptrs;
1236 kmp_lock_index_t next;
1237 struct kmp_indirect_lock_table *next_table;
1238} kmp_indirect_lock_table_t;
1240extern kmp_indirect_lock_table_t __kmp_i_lock_table;
1244static inline kmp_indirect_lock_t *__kmp_get_i_lock(kmp_lock_index_t idx) {
1245 kmp_indirect_lock_table_t *lock_table = &__kmp_i_lock_table;
1246 while (lock_table) {
1247 kmp_lock_index_t max_locks = lock_table->nrow_ptrs * KMP_I_LOCK_CHUNK;
1248 if (idx < max_locks) {
1249 kmp_lock_index_t row = idx / KMP_I_LOCK_CHUNK;
1250 kmp_lock_index_t col = idx % KMP_I_LOCK_CHUNK;
1251 if (!lock_table->table[row] || idx >= lock_table->next)
1253 return &lock_table->table[row][col];
1256 lock_table = lock_table->next_table;
1264extern int __kmp_num_locks_in_block;
1267#define KMP_LOOKUP_I_LOCK(l) \
1268 ((OMP_LOCK_T_SIZE < sizeof(void *)) \
1269 ? __kmp_get_i_lock(KMP_EXTRACT_I_INDEX(l)) \
1270 : *((kmp_indirect_lock_t **)(l)))
1273extern kmp_int32 __kmp_get_user_lock_owner(kmp_user_lock_p, kmp_uint32);
1277#define KMP_LOCK_BUSY(v, type) (v)
1278#define KMP_LOCK_FREE(type) 0
1279#define KMP_LOCK_STRIP(v) (v)
1286 kmp_uint32 max_backoff;
1287 kmp_uint32 min_tick;
1291extern kmp_backoff_t __kmp_spin_backoff_params;
1294extern void __kmp_spin_backoff(kmp_backoff_t *);