reference, declarationdefinition
definition → references, declarations, derived classes, virtual overrides
reference to multiple definitions → definitions
unreferenced
    1
    2
    3
    4
    5
    6
    7
    8
    9
   10
   11
   12
   13
   14
   15
   16
   17
   18
   19
   20
   21
   22
   23
   24
   25
   26
   27
   28
   29
   30
   31
   32
   33
   34
   35
   36
   37
   38
   39
   40
   41
   42
   43
   44
   45
   46
   47
   48
   49
   50
   51
   52
   53
   54
   55
   56
   57
   58
   59
   60
   61
   62
   63
   64
   65
   66
   67
   68
   69
   70
   71
   72
   73
   74
   75
   76
   77
   78
   79
   80
   81
   82
   83
   84
   85
   86
   87
   88
   89
   90
   91
   92
   93
   94
   95
   96
   97
   98
   99
  100
  101
  102
  103
  104
  105
  106
  107
  108
  109
  110
  111
  112
  113
  114
  115
  116
  117
  118
  119
  120
  121
  122
  123
  124
  125
  126
  127
  128
  129
  130
  131
  132
  133
  134
  135
  136
  137
  138
  139
  140
  141
  142
  143
  144
  145
  146
  147
  148
  149
  150
  151
  152
  153
  154
  155
  156
  157
  158
  159
  160
  161
  162
  163
  164
  165
  166
  167
  168
  169
  170
  171
  172
  173
  174
  175
  176
  177
  178
  179
  180
  181
  182
  183
  184
; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -infer-address-spaces %s | FileCheck %s
; Ports of most of test/CodeGen/NVPTX/access-non-generic.ll

@scalar = internal addrspace(3) global float 0.0, align 4
@array = internal addrspace(3) global [10 x float] zeroinitializer, align 4

; CHECK-LABEL: @load_store_lds_f32(
; CHECK: %tmp = load float, float addrspace(3)* @scalar, align 4
; CHECK: call void @use(float %tmp)
; CHECK: store float %v, float addrspace(3)* @scalar, align 4
; CHECK: call void @llvm.amdgcn.s.barrier()
; CHECK: %tmp2 = load float, float addrspace(3)* @scalar, align 4
; CHECK: call void @use(float %tmp2)
; CHECK: store float %v, float addrspace(3)* @scalar, align 4
; CHECK: call void @llvm.amdgcn.s.barrier()
; CHECK: %tmp3 = load float, float addrspace(3)* getelementptr inbounds ([10 x float], [10 x float] addrspace(3)* @array, i32 0, i32 5), align 4
; CHECK: call void @use(float %tmp3)
; CHECK: store float %v, float addrspace(3)* getelementptr inbounds ([10 x float], [10 x float] addrspace(3)* @array, i32 0, i32 5), align 4
; CHECK: call void @llvm.amdgcn.s.barrier()
; CHECK: %tmp4 = getelementptr inbounds [10 x float], [10 x float] addrspace(3)* @array, i32 0, i32 5
; CHECK: %tmp5 = load float, float addrspace(3)* %tmp4, align 4
; CHECK: call void @use(float %tmp5)
; CHECK: store float %v, float addrspace(3)* %tmp4, align 4
; CHECK: call void @llvm.amdgcn.s.barrier()
; CHECK: %tmp7 = getelementptr inbounds [10 x float], [10 x float] addrspace(3)* @array, i32 0, i32 %i
; CHECK: %tmp8 = load float, float addrspace(3)* %tmp7, align 4
; CHECK: call void @use(float %tmp8)
; CHECK: store float %v, float addrspace(3)* %tmp7, align 4
; CHECK: call void @llvm.amdgcn.s.barrier()
; CHECK: ret void
define amdgpu_kernel void @load_store_lds_f32(i32 %i, float %v) #0 {
bb:
  %tmp = load float, float* addrspacecast (float addrspace(3)* @scalar to float*), align 4
  call void @use(float %tmp)
  store float %v, float* addrspacecast (float addrspace(3)* @scalar to float*), align 4
  call void @llvm.amdgcn.s.barrier()
  %tmp1 = addrspacecast float addrspace(3)* @scalar to float*
  %tmp2 = load float, float* %tmp1, align 4
  call void @use(float %tmp2)
  store float %v, float* %tmp1, align 4
  call void @llvm.amdgcn.s.barrier()
  %tmp3 = load float, float* getelementptr inbounds ([10 x float], [10 x float]* addrspacecast ([10 x float] addrspace(3)* @array to [10 x float]*), i32 0, i32 5), align 4
  call void @use(float %tmp3)
  store float %v, float* getelementptr inbounds ([10 x float], [10 x float]* addrspacecast ([10 x float] addrspace(3)* @array to [10 x float]*), i32 0, i32 5), align 4
  call void @llvm.amdgcn.s.barrier()
  %tmp4 = getelementptr inbounds [10 x float], [10 x float]* addrspacecast ([10 x float] addrspace(3)* @array to [10 x float]*), i32 0, i32 5
  %tmp5 = load float, float* %tmp4, align 4
  call void @use(float %tmp5)
  store float %v, float* %tmp4, align 4
  call void @llvm.amdgcn.s.barrier()
  %tmp6 = addrspacecast [10 x float] addrspace(3)* @array to [10 x float]*
  %tmp7 = getelementptr inbounds [10 x float], [10 x float]* %tmp6, i32 0, i32 %i
  %tmp8 = load float, float* %tmp7, align 4
  call void @use(float %tmp8)
  store float %v, float* %tmp7, align 4
  call void @llvm.amdgcn.s.barrier()
  ret void
}

; CHECK-LABEL: @constexpr_load_int_from_float_lds(
; CHECK: %tmp = load i32, i32 addrspace(3)* bitcast (float addrspace(3)* @scalar to i32 addrspace(3)*), align 4
define i32 @constexpr_load_int_from_float_lds() #0 {
bb:
  %tmp = load i32, i32* addrspacecast (i32 addrspace(3)* bitcast (float addrspace(3)* @scalar to i32 addrspace(3)*) to i32*), align 4
  ret i32 %tmp
}

; CHECK-LABEL: @load_int_from_global_float(
; CHECK: %tmp1 = getelementptr float, float addrspace(1)* %input, i32 %i
; CHECK: %tmp2 = getelementptr float, float addrspace(1)* %tmp1, i32 %j
; CHECK: %tmp3 = bitcast float addrspace(1)* %tmp2 to i32 addrspace(1)*
; CHECK: %tmp4 = load i32, i32 addrspace(1)* %tmp3
; CHECK: ret i32 %tmp4
define i32 @load_int_from_global_float(float addrspace(1)* %input, i32 %i, i32 %j) #0 {
bb:
  %tmp = addrspacecast float addrspace(1)* %input to float*
  %tmp1 = getelementptr float, float* %tmp, i32 %i
  %tmp2 = getelementptr float, float* %tmp1, i32 %j
  %tmp3 = bitcast float* %tmp2 to i32*
  %tmp4 = load i32, i32* %tmp3
  ret i32 %tmp4
}

; CHECK-LABEL: @nested_const_expr(
; CHECK: store i32 1, i32 addrspace(3)* bitcast (float addrspace(3)* getelementptr inbounds ([10 x float], [10 x float] addrspace(3)* @array, i64 0, i64 1) to i32 addrspace(3)*), align 4
define amdgpu_kernel void @nested_const_expr() #0 {
  store i32 1, i32* bitcast (float* getelementptr ([10 x float], [10 x float]* addrspacecast ([10 x float] addrspace(3)* @array to [10 x float]*), i64 0, i64 1) to i32*), align 4
  ret void
}

; CHECK-LABEL: @rauw(
; CHECK: %addr = getelementptr float, float addrspace(1)* %input, i64 10
; CHECK-NEXT: %v = load float, float addrspace(1)* %addr
; CHECK-NEXT: store float %v, float addrspace(1)* %addr
; CHECK-NEXT: ret void
define amdgpu_kernel void @rauw(float addrspace(1)* %input) #0 {
bb:
  %generic_input = addrspacecast float addrspace(1)* %input to float*
  %addr = getelementptr float, float* %generic_input, i64 10
  %v = load float, float* %addr
  store float %v, float* %addr
  ret void
}

; FIXME: Should be able to eliminate the cast inside the loop
; CHECK-LABEL: @loop(

; CHECK: %p = bitcast [10 x float] addrspace(3)* @array to float addrspace(3)*
; CHECK: %end = getelementptr float, float addrspace(3)* %p, i64 10
; CHECK: br label %loop

; CHECK: loop:                                             ; preds = %loop, %entry
; CHECK: %i = phi float addrspace(3)* [ %p, %entry ], [ %i2, %loop ]
; CHECK: %v = load float, float addrspace(3)* %i
; CHECK: call void @use(float %v)
; CHECK: %i2 = getelementptr float, float addrspace(3)* %i, i64 1
; CHECK: %exit_cond = icmp eq float addrspace(3)* %i2, %end

; CHECK: br i1 %exit_cond, label %exit, label %loop
define amdgpu_kernel void @loop() #0 {
entry:
  %p = addrspacecast [10 x float] addrspace(3)* @array to float*
  %end = getelementptr float, float* %p, i64 10
  br label %loop

loop:                                             ; preds = %loop, %entry
  %i = phi float* [ %p, %entry ], [ %i2, %loop ]
  %v = load float, float* %i
  call void @use(float %v)
  %i2 = getelementptr float, float* %i, i64 1
  %exit_cond = icmp eq float* %i2, %end
  br i1 %exit_cond, label %exit, label %loop

exit:                                             ; preds = %loop
  ret void
}

@generic_end = external addrspace(1) global float*

; CHECK-LABEL: @loop_with_generic_bound(
; CHECK: %p = bitcast [10 x float] addrspace(3)* @array to float addrspace(3)*
; CHECK: %end = load float*, float* addrspace(1)* @generic_end
; CHECK: br label %loop

; CHECK: loop:
; CHECK: %i = phi float addrspace(3)* [ %p, %entry ], [ %i2, %loop ]
; CHECK: %v = load float, float addrspace(3)* %i
; CHECK: call void @use(float %v)
; CHECK: %i2 = getelementptr float, float addrspace(3)* %i, i64 1
; CHECK: %0 = addrspacecast float addrspace(3)* %i2 to float*
; CHECK: %exit_cond = icmp eq float* %0, %end
; CHECK: br i1 %exit_cond, label %exit, label %loop
define amdgpu_kernel void @loop_with_generic_bound() #0 {
entry:
  %p = addrspacecast [10 x float] addrspace(3)* @array to float*
  %end = load float*, float* addrspace(1)* @generic_end
  br label %loop

loop:                                             ; preds = %loop, %entry
  %i = phi float* [ %p, %entry ], [ %i2, %loop ]
  %v = load float, float* %i
  call void @use(float %v)
  %i2 = getelementptr float, float* %i, i64 1
  %exit_cond = icmp eq float* %i2, %end
  br i1 %exit_cond, label %exit, label %loop

exit:                                             ; preds = %loop
  ret void
}

; CHECK-LABEL: @select_bug(
; CHECK: %add.ptr157 = getelementptr inbounds i64, i64* undef, i64 select (i1 icmp ne (i32* inttoptr (i64 4873 to i32*), i32* null), i64 73, i64 93)
; CHECK: %cmp169 = icmp uge i64* undef, %add.ptr157
define void @select_bug() #0 {
  %add.ptr157 = getelementptr inbounds i64, i64* undef, i64 select (i1 icmp ne (i32* inttoptr (i64 4873 to i32*), i32* null), i64 73, i64 93)
  %cmp169 = icmp uge i64* undef, %add.ptr157
  unreachable
}

declare void @llvm.amdgcn.s.barrier() #1
declare void @use(float) #0

attributes #0 = { nounwind }
attributes #1 = { convergent nounwind }