reference, declarationdefinition
definition → references, declarations, derived classes, virtual overrides
reference to multiple definitions → definitions
unreferenced

References

projects/compiler-rt/lib/tsan/rtl/tsan_interface.h
  227 a64 __tsan_atomic64_load(const volatile a64 *a, morder mo);
  227 a64 __tsan_atomic64_load(const volatile a64 *a, morder mo);
  240 void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo);
  240 void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo);
  253 a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo);
  253 a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo);
  253 a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo);
  266 a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo);
  266 a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo);
  266 a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo);
  279 a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo);
  279 a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo);
  279 a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo);
  292 a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo);
  292 a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo);
  292 a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo);
  305 a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo);
  305 a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo);
  305 a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo);
  318 a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo);
  318 a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo);
  318 a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo);
  331 a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo);
  331 a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo);
  331 a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo);
  347 int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
  347 int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
  347 int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
  365 int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
  365 int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
  365 int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
  383 a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
  383 a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
  383 a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
  383 a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
projects/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp
  192 static atomic_uint64_t *to_atomic(const volatile a64 *a) {
  538 a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
  538 a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
  565 void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
  565 void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
  592 a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
  592 a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
  592 a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
  619 a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
  619 a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
  619 a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
  646 a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
  646 a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
  646 a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
  673 a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
  673 a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
  673 a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
  700 a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
  700 a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
  700 a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
  727 a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
  727 a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
  727 a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
  754 a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {
  754 a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {
  754 a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {
  784 int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
  784 int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
  784 int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
  816 int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
  816 int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
  816 int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
  848 a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
  848 a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
  848 a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
  848 a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,