reference, declarationdefinition
definition → references, declarations, derived classes, virtual overrides
reference to multiple definitions → definitions
unreferenced
    1
    2
    3
    4
    5
    6
    7
    8
    9
   10
   11
   12
   13
   14
   15
   16
   17
   18
   19
   20
   21
   22
   23
   24
   25
   26
   27
   28
   29
   30
   31
   32
   33
   34
   35
   36
   37
   38
   39
   40
   41
   42
   43
   44
   45
   46
   47
   48
   49
   50
   51
   52
   53
   54
   55
   56
   57
   58
   59
   60
   61
   62
   63
   64
   65
   66
   67
   68
   69
   70
   71
   72
   73
   74
   75
   76
   77
   78
   79
   80
   81
   82
   83
   84
   85
   86
   87
   88
   89
   90
   91
   92
   93
   94
   95
   96
; RUN: llc < %s -mtriple=i686-- -stackrealign -O2 | FileCheck %s
; PR28755

; Check that register allocator is able to handle that
; a-lot-of-fixed-and-reserved-registers case. We do that by
; emmiting lea before 4 cmpxchg8b operands generators.

define void @foo_alloca(i64* %a, i32 %off, i32 %n) {
  %dummy = alloca i32, i32 %n
  %addr = getelementptr inbounds i64, i64* %a, i32 %off

  %res = cmpxchg i64* %addr, i64 0, i64 1 monotonic monotonic
  ret void
}

; CHECK-LABEL: foo_alloca
; CHECK: leal    {{\(%e..,%e..,.*\)}}, [[REGISTER:%e.i]]
; CHECK-NEXT: xorl    %eax, %eax
; CHECK-NEXT: xorl    %edx, %edx
; CHECK-NEXT: xorl    %ecx, %ecx
; CHECK-NEXT: movl    $1, %ebx
; CHECK-NEXT: lock            cmpxchg8b       ([[REGISTER]])

; If we don't use index register in the address mode -
; check that we did not generate the lea.
define void @foo_alloca_direct_address(i64* %addr, i32 %n) {
  %dummy = alloca i32, i32 %n

  %res = cmpxchg i64* %addr, i64 0, i64 1 monotonic monotonic
  ret void
}

; CHECK-LABEL: foo_alloca_direct_address
; CHECK-NOT: leal    {{\(%e.*\)}}, [[REGISTER:%e.i]]
; CHECK: lock            cmpxchg8b       ([[REGISTER]])

; We used to have a bug when combining:
; - base pointer for stack frame (VLA + alignment)
; - cmpxchg8b frameindex + index reg

declare void @escape(i32*)

define void @foo_alloca_index(i32 %i, i64 %val) {
entry:
  %Counters = alloca [19 x i64], align 32
  %vla = alloca i32, i32 %i
  call void @escape(i32* %vla)
  br label %body

body:
  %p = getelementptr inbounds [19 x i64], [19 x i64]* %Counters, i32 0, i32 %i
  %t2 = cmpxchg volatile i64* %p, i64 %val, i64 %val seq_cst seq_cst
  %t3 = extractvalue { i64, i1 } %t2, 0
  %cmp.i = icmp eq i64 %val, %t3
  br i1 %cmp.i, label %done, label %body

done:
  ret void
}

; Check that we add a LEA
; CHECK-LABEL: foo_alloca_index:
; CHECK: leal    {{[0-9]*\(%e..,%e..,8\), %e..}}
; CHECK: lock            cmpxchg8b       ({{%e..}})



; We used to have a bug when combining:
; - base pointer for stack frame (VLA + alignment)
; - cmpxchg8b global + index reg

@Counters = external global [19 x i64]

define void @foo_alloca_index_global(i32 %i, i64 %val) {
entry:
  %aligner = alloca i32, align 32
  call void @escape(i32* %aligner)
  %vla = alloca i32, i32 %i
  call void @escape(i32* %vla)
  br label %body

body:
  %p = getelementptr inbounds [19 x i64], [19 x i64]* @Counters, i32 0, i32 %i
  %t2 = cmpxchg volatile i64* %p, i64 %val, i64 %val seq_cst seq_cst
  %t3 = extractvalue { i64, i1 } %t2, 0
  %cmp.i = icmp eq i64 %val, %t3
  br i1 %cmp.i, label %done, label %body

done:
  ret void
}

; Check that we add a LEA
; CHECK-LABEL: foo_alloca_index_global:
; CHECK: leal    {{Counters\(,%e..,8\), %e..}}
; CHECK: lock            cmpxchg8b       ({{%e..}})