reference, declarationdefinition
definition → references, declarations, derived classes, virtual overrides
reference to multiple definitions → definitions
unreferenced

References

projects/compiler-rt/lib/tsan/rtl/tsan_interface.h
  221 a8 __tsan_atomic8_load(const volatile a8 *a, morder mo);
  223 a16 __tsan_atomic16_load(const volatile a16 *a, morder mo);
  225 a32 __tsan_atomic32_load(const volatile a32 *a, morder mo);
  227 a64 __tsan_atomic64_load(const volatile a64 *a, morder mo);
  230 a128 __tsan_atomic128_load(const volatile a128 *a, morder mo);
  234 void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo);
  236 void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo);
  238 void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo);
  240 void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo);
  243 void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo);
  247 a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo);
  249 a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo);
  251 a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo);
  253 a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo);
  256 a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo);
  260 a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo);
  262 a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo);
  264 a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo);
  266 a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo);
  269 a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo);
  273 a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo);
  275 a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo);
  277 a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo);
  279 a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo);
  282 a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo);
  286 a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo);
  288 a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo);
  290 a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo);
  292 a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo);
  295 a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo);
  299 a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo);
  301 a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo);
  303 a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo);
  305 a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo);
  308 a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo);
  312 a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo);
  314 a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo);
  316 a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo);
  318 a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo);
  321 a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo);
  325 a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo);
  327 a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo);
  329 a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo);
  331 a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo);
  334 a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo);
  339                                            morder mo, morder fmo);
  339                                            morder mo, morder fmo);
  342                                             morder mo, morder fmo);
  342                                             morder mo, morder fmo);
  345                                             morder mo, morder fmo);
  345                                             morder mo, morder fmo);
  348                                             morder mo, morder fmo);
  348                                             morder mo, morder fmo);
  352                                              morder mo, morder fmo);
  352                                              morder mo, morder fmo);
  356 int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v, morder mo,
  357                                          morder fmo);
  360                                           morder mo, morder fmo);
  360                                           morder mo, morder fmo);
  363                                           morder mo, morder fmo);
  363                                           morder mo, morder fmo);
  366                                           morder mo, morder fmo);
  366                                           morder mo, morder fmo);
  370                                            morder mo, morder fmo);
  370                                            morder mo, morder fmo);
  374 a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v, morder mo,
  375                                        morder fmo);
  378                                          morder mo, morder fmo);
  378                                          morder mo, morder fmo);
  381                                          morder mo, morder fmo);
  381                                          morder mo, morder fmo);
  384                                          morder mo, morder fmo);
  384                                          morder mo, morder fmo);
  388                                            morder mo, morder fmo);
  388                                            morder mo, morder fmo);
  392 void __tsan_atomic_thread_fence(morder mo);
  394 void __tsan_atomic_signal_fence(morder mo);
projects/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp
   35 static bool IsLoadOrder(morder mo) {
   40 static bool IsStoreOrder(morder mo) {
   44 static bool IsReleaseOrder(morder mo) {
   48 static bool IsAcquireOrder(morder mo) {
   53 static bool IsAcqRelOrder(morder mo) {
  196 static memory_order to_mo(morder mo) {
  210 static T NoTsanAtomicLoad(const volatile T *a, morder mo) {
  215 static a128 NoTsanAtomicLoad(const volatile a128 *a, morder mo) {
  222 static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a, morder mo) {
  246 static void NoTsanAtomicStore(volatile T *a, T v, morder mo) {
  251 static void NoTsanAtomicStore(volatile a128 *a, a128 v, morder mo) {
  259     morder mo) {
  281 static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
  303 static T NoTsanAtomicExchange(volatile T *a, T v, morder mo) {
  308 static T NoTsanAtomicFetchAdd(volatile T *a, T v, morder mo) {
  313 static T NoTsanAtomicFetchSub(volatile T *a, T v, morder mo) {
  318 static T NoTsanAtomicFetchAnd(volatile T *a, T v, morder mo) {
  323 static T NoTsanAtomicFetchOr(volatile T *a, T v, morder mo) {
  328 static T NoTsanAtomicFetchXor(volatile T *a, T v, morder mo) {
  333 static T NoTsanAtomicFetchNand(volatile T *a, T v, morder mo) {
  339     morder mo) {
  345     morder mo) {
  351     morder mo) {
  357     morder mo) {
  363     morder mo) {
  369     morder mo) {
  375     morder mo) {
  380 static bool NoTsanAtomicCAS(volatile T *a, T *c, T v, morder mo, morder fmo) {
  380 static bool NoTsanAtomicCAS(volatile T *a, T *c, T v, morder mo, morder fmo) {
  386     morder mo, morder fmo) {
  386     morder mo, morder fmo) {
  397 static T NoTsanAtomicCAS(volatile T *a, T c, T v, morder mo, morder fmo) {
  397 static T NoTsanAtomicCAS(volatile T *a, T c, T v, morder mo, morder fmo) {
  404     volatile T *a, T *c, T v, morder mo, morder fmo) {
  404     volatile T *a, T *c, T v, morder mo, morder fmo) {
  437     volatile T *a, T c, T v, morder mo, morder fmo) {
  437     volatile T *a, T c, T v, morder mo, morder fmo) {
  443 static void NoTsanAtomicFence(morder mo) {
  447 static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
  458 static morder convert_morder(morder mo) {
  458 static morder convert_morder(morder mo) {
  492                morder mo, const char *func)
  505 static void AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) {
  523 a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) {
  528 a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) {
  533 a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) {
  538 a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
  544 a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) {
  550 void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
  555 void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) {
  560 void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) {
  565 void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
  571 void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) {
  577 a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
  582 a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) {
  587 a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) {
  592 a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
  598 a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) {
  604 a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
  609 a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) {
  614 a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) {
  619 a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
  625 a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) {
  631 a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {
  636 a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) {
  641 a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) {
  646 a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
  652 a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) {
  658 a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
  663 a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) {
  668 a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) {
  673 a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
  679 a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) {
  685 a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
  690 a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) {
  695 a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) {
  700 a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
  706 a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) {
  712 a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
  717 a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) {
  722 a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) {
  727 a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
  733 a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) {
  739 a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) {
  744 a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) {
  749 a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) {
  754 a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {
  760 a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) {
  767     morder mo, morder fmo) {
  767     morder mo, morder fmo) {
  773     morder mo, morder fmo) {
  773     morder mo, morder fmo) {
  779     morder mo, morder fmo) {
  779     morder mo, morder fmo) {
  785     morder mo, morder fmo) {
  785     morder mo, morder fmo) {
  792     morder mo, morder fmo) {
  792     morder mo, morder fmo) {
  799     morder mo, morder fmo) {
  799     morder mo, morder fmo) {
  805     morder mo, morder fmo) {
  805     morder mo, morder fmo) {
  811     morder mo, morder fmo) {
  811     morder mo, morder fmo) {
  817     morder mo, morder fmo) {
  817     morder mo, morder fmo) {
  824     morder mo, morder fmo) {
  824     morder mo, morder fmo) {
  831     morder mo, morder fmo) {
  831     morder mo, morder fmo) {
  837     morder mo, morder fmo) {
  837     morder mo, morder fmo) {
  843     morder mo, morder fmo) {
  843     morder mo, morder fmo) {
  849     morder mo, morder fmo) {
  849     morder mo, morder fmo) {
  856     morder mo, morder fmo) {
  856     morder mo, morder fmo) {
  862 void __tsan_atomic_thread_fence(morder mo) {
  868 void __tsan_atomic_signal_fence(morder mo) {