reference, declarationdefinition
definition → references, declarations, derived classes, virtual overrides
reference to multiple definitions → definitions
unreferenced
    1
    2
    3
    4
    5
    6
    7
    8
    9
   10
   11
   12
   13
   14
   15
   16
   17
   18
   19
   20
   21
   22
   23
   24
   25
   26
   27
   28
   29
   30
   31
   32
   33
   34
   35
   36
   37
   38
   39
   40
   41
   42
   43
   44
   45
   46
   47
   48
   49
   50
   51
   52
   53
   54
   55
   56
   57
   58
   59
   60
   61
   62
   63
   64
   65
   66
   67
   68
   69
   70
   71
   72
   73
   74
   75
   76
   77
   78
   79
   80
   81
   82
   83
   84
   85
   86
   87
   88
   89
   90
   91
   92
   93
   94
   95
   96
   97
   98
   99
  100
  101
  102
  103
  104
  105
  106
  107
  108
  109
  110
  111
  112
  113
  114
  115
  116
  117
  118
  119
  120
// RUN: %clang_cc1 < %s -triple armv5e-none-linux-gnueabi -emit-llvm -O1 | FileCheck %s

// FIXME: This file should not be checking -O1 output.
// Ie, it is testing many IR optimizer passes as part of front-end verification.

enum memory_order {
  memory_order_relaxed, memory_order_consume, memory_order_acquire,
  memory_order_release, memory_order_acq_rel, memory_order_seq_cst
};

int *test_c11_atomic_fetch_add_int_ptr(_Atomic(int *) *p) {
  // CHECK: test_c11_atomic_fetch_add_int_ptr
  // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_add_4(i8* {{%[0-9]+}}, i32 12, i32 5)
  return __c11_atomic_fetch_add(p, 3, memory_order_seq_cst);
}

int *test_c11_atomic_fetch_sub_int_ptr(_Atomic(int *) *p) {
  // CHECK: test_c11_atomic_fetch_sub_int_ptr
  // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_sub_4(i8* {{%[0-9]+}}, i32 20, i32 5)
  return __c11_atomic_fetch_sub(p, 5, memory_order_seq_cst);
}

int test_c11_atomic_fetch_add_int(_Atomic(int) *p) {
  // CHECK: test_c11_atomic_fetch_add_int
  // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_add_4(i8* {{%[0-9]+}}, i32 3, i32 5)
  return __c11_atomic_fetch_add(p, 3, memory_order_seq_cst);
}

int test_c11_atomic_fetch_sub_int(_Atomic(int) *p) {
  // CHECK: test_c11_atomic_fetch_sub_int
  // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_sub_4(i8* {{%[0-9]+}}, i32 5, i32 5)
  return __c11_atomic_fetch_sub(p, 5, memory_order_seq_cst);
}

int *fp2a(int **p) {
  // CHECK: @fp2a
  // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_sub_4(i8* {{%[0-9]+}}, i32 4, i32 0)
  // Note, the GNU builtins do not multiply by sizeof(T)!
  return __atomic_fetch_sub(p, 4, memory_order_relaxed);
}

int test_atomic_fetch_add(int *p) {
  // CHECK: test_atomic_fetch_add
  // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_add_4(i8* {{%[0-9]+}}, i32 55, i32 5)
  return __atomic_fetch_add(p, 55, memory_order_seq_cst);
}

int test_atomic_fetch_sub(int *p) {
  // CHECK: test_atomic_fetch_sub
  // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_sub_4(i8* {{%[0-9]+}}, i32 55, i32 5)
  return __atomic_fetch_sub(p, 55, memory_order_seq_cst);
}

int test_atomic_fetch_and(int *p) {
  // CHECK: test_atomic_fetch_and
  // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_and_4(i8* {{%[0-9]+}}, i32 55, i32 5)
  return __atomic_fetch_and(p, 55, memory_order_seq_cst);
}

int test_atomic_fetch_or(int *p) {
  // CHECK: test_atomic_fetch_or
  // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_or_4(i8* {{%[0-9]+}}, i32 55, i32 5)
  return __atomic_fetch_or(p, 55, memory_order_seq_cst);
}

int test_atomic_fetch_xor(int *p) {
  // CHECK: test_atomic_fetch_xor
  // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_xor_4(i8* {{%[0-9]+}}, i32 55, i32 5)
  return __atomic_fetch_xor(p, 55, memory_order_seq_cst);
}

int test_atomic_fetch_nand(int *p) {
  // CHECK: test_atomic_fetch_nand
  // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_nand_4(i8* {{%[0-9]+}}, i32 55, i32 5)
  return __atomic_fetch_nand(p, 55, memory_order_seq_cst);
}

int test_atomic_add_fetch(int *p) {
  // CHECK: test_atomic_add_fetch
  // CHECK: [[CALL:%[^ ]*]] = tail call i32 @__atomic_fetch_add_4(i8* {{%[0-9]+}}, i32 55, i32 5)
  // CHECK: {{%[^ ]*}} = add i32 [[CALL]], 55
  return __atomic_add_fetch(p, 55, memory_order_seq_cst);
}

int test_atomic_sub_fetch(int *p) {
  // CHECK: test_atomic_sub_fetch
  // CHECK: [[CALL:%[^ ]*]] = tail call i32 @__atomic_fetch_sub_4(i8* {{%[0-9]+}}, i32 55, i32 5)
  // CHECK: {{%[^ ]*}} = add i32 [[CALL]], -55
  return __atomic_sub_fetch(p, 55, memory_order_seq_cst);
}

int test_atomic_and_fetch(int *p) {
  // CHECK: test_atomic_and_fetch
  // CHECK: [[CALL:%[^ ]*]] = tail call i32 @__atomic_fetch_and_4(i8* {{%[0-9]+}}, i32 55, i32 5)
  // CHECK: {{%[^ ]*}} = and i32 [[CALL]], 55
  return __atomic_and_fetch(p, 55, memory_order_seq_cst);
}

int test_atomic_or_fetch(int *p) {
  // CHECK: test_atomic_or_fetch
  // CHECK: [[CALL:%[^ ]*]] = tail call i32 @__atomic_fetch_or_4(i8* {{%[0-9]+}}, i32 55, i32 5)
  // CHECK: {{%[^ ]*}} = or i32 [[CALL]], 55
  return __atomic_or_fetch(p, 55, memory_order_seq_cst);
}

int test_atomic_xor_fetch(int *p) {
  // CHECK: test_atomic_xor_fetch
  // CHECK: [[CALL:%[^ ]*]] = tail call i32 @__atomic_fetch_xor_4(i8* {{%[0-9]+}}, i32 55, i32 5)
  // CHECK: {{%[^ ]*}} = xor i32 [[CALL]], 55
  return __atomic_xor_fetch(p, 55, memory_order_seq_cst);
}

int test_atomic_nand_fetch(int *p) {
  // CHECK: test_atomic_nand_fetch
  // CHECK: [[CALL:%[^ ]*]] = tail call i32 @__atomic_fetch_nand_4(i8* {{%[0-9]+}}, i32 55, i32 5)
  // FIXME: We should not be checking optimized IR. It changes independently of clang.
  // FIXME-CHECK: [[AND:%[^ ]*]] = and i32 [[CALL]], 55
  // FIXME-CHECK: {{%[^ ]*}} = xor i32 [[AND]], -1
  return __atomic_nand_fetch(p, 55, memory_order_seq_cst);
}