reference, declarationdefinition
definition → references, declarations, derived classes, virtual overrides
reference to multiple definitions → definitions
unreferenced

References

projects/openmp/runtime/src/kmp_lock.cpp
 2225   return lck->lk.owner_id - 1;
 2229   return lck->lk.depth_locked != -1;
 2234   kmp_uint64 ticket = KMP_ATOMIC_INC(&lck->lk.next_ticket);
 2235   kmp_uint64 mask = lck->lk.mask; // atomic load
 2236   std::atomic<kmp_uint64> *polls = lck->lk.polls;
 2263     mask = lck->lk.mask; // atomic load
 2264     polls = lck->lk.polls; // atomic load
 2271   lck->lk.now_serving = ticket; // non-volatile store
 2278   if ((lck->lk.old_polls != NULL) && (ticket >= lck->lk.cleanup_ticket)) {
 2278   if ((lck->lk.old_polls != NULL) && (ticket >= lck->lk.cleanup_ticket)) {
 2279     __kmp_free(lck->lk.old_polls);
 2280     lck->lk.old_polls = NULL;
 2281     lck->lk.cleanup_ticket = 0;
 2287   if (lck->lk.old_polls == NULL) {
 2290     kmp_uint32 num_polls = TCR_4(lck->lk.num_polls);
 2298         num_polls = TCR_4(lck->lk.num_polls);
 2309       kmp_uint64 num_waiting = TCR_8(lck->lk.next_ticket) - ticket - 1;
 2345       lck->lk.old_polls = old_polls;
 2346       lck->lk.polls = polls; // atomic store
 2350       lck->lk.num_polls = num_polls;
 2351       lck->lk.mask = mask; // atomic store
 2359       lck->lk.cleanup_ticket = lck->lk.next_ticket;
 2359       lck->lk.cleanup_ticket = lck->lk.next_ticket;
 2374   if (lck->lk.initialized != lck) {
 2386   lck->lk.owner_id = gtid + 1;
 2393   kmp_uint64 ticket = lck->lk.next_ticket; // atomic load
 2394   std::atomic<kmp_uint64> *polls = lck->lk.polls;
 2395   kmp_uint64 mask = lck->lk.mask; // atomic load
 2398     if (__kmp_atomic_compare_store_acq(&lck->lk.next_ticket, ticket,
 2403       lck->lk.now_serving = ticket; // non-volatile store
 2420   if (lck->lk.initialized != lck) {
 2430     lck->lk.owner_id = gtid + 1;
 2438   kmp_uint64 ticket = lck->lk.now_serving + 1; // non-atomic load
 2439   std::atomic<kmp_uint64> *polls = lck->lk.polls; // atomic load
 2440   kmp_uint64 mask = lck->lk.mask; // atomic load
 2453   if (lck->lk.initialized != lck) {
 2466   lck->lk.owner_id = 0;
 2471   lck->lk.location = NULL;
 2472   lck->lk.mask = 0;
 2473   lck->lk.num_polls = 1;
 2474   lck->lk.polls = (std::atomic<kmp_uint64> *)__kmp_allocate(
 2475       lck->lk.num_polls * sizeof(*(lck->lk.polls)));
 2475       lck->lk.num_polls * sizeof(*(lck->lk.polls)));
 2476   lck->lk.cleanup_ticket = 0;
 2477   lck->lk.old_polls = NULL;
 2478   lck->lk.next_ticket = 0;
 2479   lck->lk.now_serving = 0;
 2480   lck->lk.owner_id = 0; // no thread owns the lock.
 2481   lck->lk.depth_locked = -1; // >= 0 for nestable locks, -1 for simple locks.
 2482   lck->lk.initialized = lck;
 2488   lck->lk.initialized = NULL;
 2489   lck->lk.location = NULL;
 2490   if (lck->lk.polls.load() != NULL) {
 2491     __kmp_free(lck->lk.polls.load());
 2492     lck->lk.polls = NULL;
 2494   if (lck->lk.old_polls != NULL) {
 2495     __kmp_free(lck->lk.old_polls);
 2496     lck->lk.old_polls = NULL;
 2498   lck->lk.mask = 0;
 2499   lck->lk.num_polls = 0;
 2500   lck->lk.cleanup_ticket = 0;
 2501   lck->lk.next_ticket = 0;
 2502   lck->lk.now_serving = 0;
 2503   lck->lk.owner_id = 0;
 2504   lck->lk.depth_locked = -1;
 2509   if (lck->lk.initialized != lck) {
 2527     lck->lk.depth_locked += 1;
 2533     lck->lk.depth_locked = 1;
 2535     lck->lk.owner_id = gtid + 1;
 2543   if (lck->lk.initialized != lck) {
 2558     retval = ++lck->lk.depth_locked;
 2563     retval = lck->lk.depth_locked = 1;
 2565     lck->lk.owner_id = gtid + 1;
 2573   if (lck->lk.initialized != lck) {
 2586   if (--(lck->lk.depth_locked) == 0) {
 2588     lck->lk.owner_id = 0;
 2599   if (lck->lk.initialized != lck) {
 2616   lck->lk.depth_locked = 0; // >= 0 for nestable locks, -1 for simple locks
 2621   lck->lk.depth_locked = 0;
 2626   if (lck->lk.initialized != lck) {
 2641   return lck->lk.location;
 2646   lck->lk.location = loc;
 2650   return lck->lk.flags;
 2655   lck->lk.flags = flags;