reference, declarationdefinition
definition → references, declarations, derived classes, virtual overrides
reference to multiple definitions → definitions
unreferenced
    1
    2
    3
    4
    5
    6
    7
    8
    9
   10
   11
   12
   13
   14
   15
   16
   17
   18
   19
   20
   21
   22
   23
   24
   25
   26
   27
   28
   29
   30
   31
   32
   33
   34
   35
   36
   37
   38
   39
   40
   41
   42
   43
   44
   45
   46
   47
   48
   49
   50
   51
   52
   53
   54
   55
   56
   57
   58
   59
   60
   61
   62
   63
   64
   65
   66
   67
   68
   69
   70
   71
   72
   73
   74
   75
   76
   77
   78
   79
   80
   81
   82
   83
   84
   85
   86
   87
   88
   89
   90
   91
   92
   93
   94
   95
   96
   97
   98
   99
  100
  101
  102
  103
  104
  105
  106
  107
  108
  109
  110
  111
  112
  113
  114
  115
  116
  117
  118
  119
  120
  121
  122
  123
  124
  125
  126
  127
  128
  129
  130
  131
  132
  133
  134
  135
  136
  137
  138
  139
  140
  141
  142
  143
  144
  145
  146
  147
  148
  149
  150
  151
  152
  153
  154
  155
  156
  157
  158
  159
  160
  161
  162
  163
  164
  165
  166
  167
  168
  169
  170
  171
  172
  173
  174
  175
  176
  177
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -O2 -tail-dup-size=1000 -tail-dup-placement-threshold=1000 -enable-tail-merge=0 < %s | FileCheck -enable-var-scope -check-prefix=GCN %s

; Need to to trigger tail duplication this during
; MachineBlockPlacement, since calls aren't tail duplicated pre-RA.

declare void @nonconvergent_func() #0
declare void @convergent_func() #1
declare void @llvm.amdgcn.s.barrier() #1
declare void @llvm.amdgcn.ds.gws.init(i32, i32) #2
declare void @llvm.amdgcn.ds.gws.barrier(i32, i32) #2
declare void @llvm.amdgcn.ds.gws.sema.release.all(i32 %offset) #2

; barrier shouldn't be duplicated.

; GCN-LABEL: {{^}}taildup_barrier:
; GCN: s_barrier
; GCN-NOT: s_barrier
define void @taildup_barrier(i32 addrspace(1)* %a, i32 addrspace(1)* %b, i1 %cond) #0 {
entry:
  br i1 %cond, label %bb1, label %bb2

bb1:
  store i32 0, i32 addrspace(1)* %a
  br label %call

bb2:
  store i32 1, i32 addrspace(1)* %a
  br label %call

call:
  call void @llvm.amdgcn.s.barrier()
  br label %ret

ret:
  ret void
}

; GCN-LABEL: {{^}}taildup_convergent_call:
; GCN: s_swappc_b64
; GCN-NOT: s_swappc_b64
define void @taildup_convergent_call(i32 addrspace(1)* %a, i32 addrspace(1)* %b, i1 %cond) #1 {
entry:
  br i1 %cond, label %bb1, label %bb2

bb1:
  store i32 0, i32 addrspace(1)* %a
  br label %call

bb2:
  store i32 1, i32 addrspace(1)* %a
  br label %call

call:
  call void @convergent_func()
  br label %ret

ret:
  ret void
}

; TODO: Currently there is only one convergent call pseudo, but this
; theoretically could use a nonconvergent variant.
; GCN-LABEL: {{^}}taildup_nonconvergent_call:
; GCN: s_swappc_b64
; GCN-NOT: s_swappc_b64
define void @taildup_nonconvergent_call(i32 addrspace(1)* %a, i32 addrspace(1)* %b, i1 %cond) #1 {
entry:
  br i1 %cond, label %bb1, label %bb2

bb1:
  store i32 0, i32 addrspace(1)* %a
  br label %call

bb2:
  store i32 1, i32 addrspace(1)* %a
  br label %call

call:
  call void @nonconvergent_func()
  br label %ret

ret:
  ret void
}

; GCN-LABEL: {{^}}taildup_convergent_tailcall:
; GCN: s_setpc_b64
; GCN-NOT: s_setpc_b64
define void @taildup_convergent_tailcall(i32 addrspace(1)* %a, i32 addrspace(1)* %b, i1 %cond) #1 {
entry:
  br i1 %cond, label %bb1, label %bb2

bb1:
  store i32 0, i32 addrspace(1)* %a
  br label %call

bb2:
  store i32 1, i32 addrspace(1)* %a
  br label %call

call:
  tail call void @convergent_func()
  ret void
}

; GCN-LABEL: {{^}}taildup_gws_init:
; GCN: ds_gws_init
; GCN-NOT: ds_gws_init
define amdgpu_kernel void @taildup_gws_init(i32 addrspace(1)* %a, i32 addrspace(1)* %b, i1 %cond, i32 %val, i32 %offset) #0 {
entry:
  br i1 %cond, label %bb1, label %bb2

bb1:
  store i32 0, i32 addrspace(1)* %a
  br label %call

bb2:
  store i32 1, i32 addrspace(1)* %a
  br label %call

call:
  call void @llvm.amdgcn.ds.gws.init(i32 %val, i32 %offset)
  br label %ret

ret:
  ret void
}

; GCN-LABEL: {{^}}taildup_gws_barrier:
; GCN: ds_gws_barrier
; GCN-NOT: ds_gws_barrier
define amdgpu_kernel void @taildup_gws_barrier(i32 addrspace(1)* %a, i32 addrspace(1)* %b, i1 %cond, i32 %val, i32 %offset) #0 {
entry:
  br i1 %cond, label %bb1, label %bb2

bb1:
  store i32 0, i32 addrspace(1)* %a
  br label %call

bb2:
  store i32 1, i32 addrspace(1)* %a
  br label %call

call:
  call void @llvm.amdgcn.ds.gws.barrier(i32 %val, i32 %offset)
  br label %ret

ret:
  ret void
}

; GCN-LABEL: {{^}}taildup_gws_sema_release_all:
; GCN: ds_gws_sema_release_all
; GCN-NOT: ds_gws
define amdgpu_kernel void @taildup_gws_sema_release_all(i32 addrspace(1)* %a, i32 addrspace(1)* %b, i1 %cond, i32 %offset) #0 {
entry:
  br i1 %cond, label %bb1, label %bb2

bb1:
  store i32 0, i32 addrspace(1)* %a
  br label %call

bb2:
  store i32 1, i32 addrspace(1)* %a
  br label %call

call:
  call void @llvm.amdgcn.ds.gws.sema.release.all(i32 %offset)
  br label %ret

ret:
  ret void
}

attributes #0 = { nounwind }
attributes #1 = { nounwind convergent }
attributes #2 = { convergent inaccessiblememonly nounwind }