reference, declarationdefinition
definition → references, declarations, derived classes, virtual overrides
reference to multiple definitions → definitions
unreferenced
    1
    2
    3
    4
    5
    6
    7
    8
    9
   10
   11
   12
   13
   14
   15
   16
   17
   18
   19
   20
   21
   22
   23
   24
   25
   26
   27
   28
   29
   30
   31
   32
   33
   34
   35
   36
   37
   38
   39
   40
   41
   42
   43
   44
   45
   46
   47
   48
   49
   50
   51
   52
   53
   54
   55
   56
   57
   58
   59
   60
   61
   62
   63
   64
   65
   66
   67
   68
   69
   70
   71
   72
   73
   74
   75
   76
   77
   78
   79
   80
   81
   82
   83
   84
   85
   86
   87
   88
   89
   90
   91
   92
   93
   94
   95
   96
   97
   98
   99
  100
  101
  102
  103
  104
  105
  106
  107
  108
  109
  110
  111
  112
  113
  114
  115
  116
  117
  118
  119
  120
  121
  122
  123
  124
  125
  126
  127
  128
  129
  130
  131
  132
  133
  134
  135
  136
  137
  138
  139
  140
  141
  142
  143
  144
  145
  146
  147
  148
  149
  150
  151
  152
  153
  154
  155
  156
  157
  158
  159
  160
  161
  162
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -slp-vectorizer -slp-threshold=-18 < %s | FileCheck %s

; Make sure there's no SCEV assert when the indexes are for different
; sized address spaces

target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"

define void @slp_scev_assert(i32 %idx, i64 %tmp3) #0 {
; CHECK-LABEL: @slp_scev_assert(
; CHECK-NEXT:  bb:
; CHECK-NEXT:    [[TMP:%.*]] = addrspacecast i8 addrspace(5)* undef to i8*
; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i8, i8 addrspace(5)* undef, i32 [[IDX:%.*]]
; CHECK-NEXT:    [[TMP4:%.*]] = getelementptr inbounds i8, i8* [[TMP]], i64 [[TMP3:%.*]]
; CHECK-NEXT:    store i8 0, i8 addrspace(5)* [[TMP2]]
; CHECK-NEXT:    store i8 0, i8* [[TMP4]]
; CHECK-NEXT:    ret void
;
bb:
  %tmp = addrspacecast i8 addrspace(5)* undef to i8*
  %tmp2 = getelementptr inbounds i8, i8 addrspace(5)* undef, i32 %idx
  %tmp4 = getelementptr inbounds i8, i8* %tmp, i64 %tmp3
  store i8 0, i8 addrspace(5)* %tmp2
  store i8 0, i8* %tmp4
  ret void
}

define void @multi_as_reduction_different_sized(i32 addrspace(3)* %lds, i32 %idx0, i64 %idx1) #0 {
; CHECK-LABEL: @multi_as_reduction_different_sized(
; CHECK-NEXT:  bb:
; CHECK-NEXT:    [[FLAT:%.*]] = addrspacecast i32 addrspace(3)* [[LDS:%.*]] to i32*
; CHECK-NEXT:    [[ADD0:%.*]] = add i32 [[IDX0:%.*]], 2
; CHECK-NEXT:    [[ADD1:%.*]] = add i64 [[IDX1:%.*]], 1
; CHECK-NEXT:    [[LDS_1:%.*]] = getelementptr inbounds i32, i32 addrspace(3)* [[LDS]], i32 [[ADD0]]
; CHECK-NEXT:    [[FLAT_1:%.*]] = getelementptr inbounds i32, i32* [[FLAT]], i64 [[ADD1]]
; CHECK-NEXT:    [[LOAD_LDS_0:%.*]] = load i32, i32 addrspace(3)* [[LDS]], align 4
; CHECK-NEXT:    [[LOAD_LDS_1:%.*]] = load i32, i32 addrspace(3)* [[LDS_1]], align 4
; CHECK-NEXT:    [[LOAD_FLAT_0:%.*]] = load i32, i32* [[FLAT]], align 4
; CHECK-NEXT:    [[LOAD_FLAT_1:%.*]] = load i32, i32* [[FLAT_1]], align 4
; CHECK-NEXT:    [[SUB0:%.*]] = sub i32 [[LOAD_FLAT_0]], [[LOAD_LDS_0]]
; CHECK-NEXT:    [[SUB1:%.*]] = sub i32 [[LOAD_FLAT_1]], [[LOAD_LDS_1]]
; CHECK-NEXT:    store i32 [[SUB0]], i32* undef
; CHECK-NEXT:    store i32 [[SUB1]], i32* undef
; CHECK-NEXT:    ret void
;
bb:
  %flat = addrspacecast i32 addrspace(3)* %lds to i32*
  %add0 = add i32 %idx0, 2
  %add1 = add i64 %idx1, 1

  %lds.1 = getelementptr inbounds i32, i32 addrspace(3)* %lds, i32 %add0
  %flat.1 = getelementptr inbounds i32, i32* %flat, i64 %add1

  %load.lds.0 = load i32, i32 addrspace(3)* %lds, align 4
  %load.lds.1 = load i32, i32 addrspace(3)* %lds.1, align 4

  %load.flat.0 = load i32, i32* %flat, align 4
  %load.flat.1 = load i32, i32* %flat.1, align 4

  %sub0 = sub i32 %load.flat.0, %load.lds.0
  %sub1 = sub i32 %load.flat.1, %load.lds.1

  store i32 %sub0, i32* undef
  store i32 %sub1, i32* undef
  ret void
}

; This should vectorize if using GetUnderlyingObject
define void @multi_as_reduction_same_size(i32 addrspace(1)* %global, i64 %idx0, i64 %idx1) #0 {
; CHECK-LABEL: @multi_as_reduction_same_size(
; CHECK-NEXT:  bb:
; CHECK-NEXT:    [[FLAT:%.*]] = addrspacecast i32 addrspace(1)* [[GLOBAL:%.*]] to i32*
; CHECK-NEXT:    [[ADD0:%.*]] = add i64 [[IDX0:%.*]], 2
; CHECK-NEXT:    [[ADD1:%.*]] = add i64 [[IDX1:%.*]], 1
; CHECK-NEXT:    [[GLOBAL_1:%.*]] = getelementptr inbounds i32, i32 addrspace(1)* [[GLOBAL]], i64 [[ADD0]]
; CHECK-NEXT:    [[FLAT_1:%.*]] = getelementptr inbounds i32, i32* [[FLAT]], i64 [[ADD1]]
; CHECK-NEXT:    [[LOAD_GLOBAL_0:%.*]] = load i32, i32 addrspace(1)* [[GLOBAL]], align 4
; CHECK-NEXT:    [[LOAD_GLOBAL_1:%.*]] = load i32, i32 addrspace(1)* [[GLOBAL_1]], align 4
; CHECK-NEXT:    [[LOAD_FLAT_0:%.*]] = load i32, i32* [[FLAT]], align 4
; CHECK-NEXT:    [[LOAD_FLAT_1:%.*]] = load i32, i32* [[FLAT_1]], align 4
; CHECK-NEXT:    [[SUB0:%.*]] = sub i32 [[LOAD_FLAT_0]], [[LOAD_GLOBAL_0]]
; CHECK-NEXT:    [[SUB1:%.*]] = sub i32 [[LOAD_FLAT_1]], [[LOAD_GLOBAL_1]]
; CHECK-NEXT:    store i32 [[SUB0]], i32* undef
; CHECK-NEXT:    store i32 [[SUB1]], i32* undef
; CHECK-NEXT:    ret void
;
bb:
  %flat = addrspacecast i32 addrspace(1)* %global to i32*
  %add0 = add i64 %idx0, 2
  %add1 = add i64 %idx1, 1

  %global.1 = getelementptr inbounds i32, i32 addrspace(1)* %global, i64 %add0
  %flat.1 = getelementptr inbounds i32, i32* %flat, i64 %add1

  %load.global.0 = load i32, i32 addrspace(1)* %global, align 4
  %load.global.1 = load i32, i32 addrspace(1)* %global.1, align 4

  %load.flat.0 = load i32, i32* %flat, align 4
  %load.flat.1 = load i32, i32* %flat.1, align 4

  %sub0 = sub i32 %load.flat.0, %load.global.0
  %sub1 = sub i32 %load.flat.1, %load.global.1

  store i32 %sub0, i32* undef
  store i32 %sub1, i32* undef
  ret void
}

; This should vectorize if using GetUnderlyingObject
; The add is done in the same width, even though the address space size is smaller
define void @multi_as_reduction_different_sized_noncanon(i32 addrspace(3)* %lds, i64 %idx0, i64 %idx1) #0 {
; CHECK-LABEL: @multi_as_reduction_different_sized_noncanon(
; CHECK-NEXT:  bb:
; CHECK-NEXT:    [[FLAT:%.*]] = addrspacecast i32 addrspace(3)* [[LDS:%.*]] to i32*
; CHECK-NEXT:    [[ADD0:%.*]] = add i64 [[IDX0:%.*]], 2
; CHECK-NEXT:    [[ADD1:%.*]] = add i64 [[IDX1:%.*]], 1
; CHECK-NEXT:    [[LDS_1:%.*]] = getelementptr inbounds i32, i32 addrspace(3)* [[LDS]], i64 [[ADD0]]
; CHECK-NEXT:    [[FLAT_1:%.*]] = getelementptr inbounds i32, i32* [[FLAT]], i64 [[ADD1]]
; CHECK-NEXT:    [[LOAD_LDS_0:%.*]] = load i32, i32 addrspace(3)* [[LDS]], align 4
; CHECK-NEXT:    [[LOAD_LDS_1:%.*]] = load i32, i32 addrspace(3)* [[LDS_1]], align 4
; CHECK-NEXT:    [[LOAD_FLAT_0:%.*]] = load i32, i32* [[FLAT]], align 4
; CHECK-NEXT:    [[LOAD_FLAT_1:%.*]] = load i32, i32* [[FLAT_1]], align 4
; CHECK-NEXT:    [[SUB0:%.*]] = sub i32 [[LOAD_FLAT_0]], [[LOAD_LDS_0]]
; CHECK-NEXT:    [[SUB1:%.*]] = sub i32 [[LOAD_FLAT_1]], [[LOAD_LDS_1]]
; CHECK-NEXT:    store i32 [[SUB0]], i32* undef
; CHECK-NEXT:    store i32 [[SUB1]], i32* undef
; CHECK-NEXT:    ret void
;
bb:
  %flat = addrspacecast i32 addrspace(3)* %lds to i32*
  %add0 = add i64 %idx0, 2
  %add1 = add i64 %idx1, 1

  %lds.1 = getelementptr inbounds i32, i32 addrspace(3)* %lds, i64 %add0
  %flat.1 = getelementptr inbounds i32, i32* %flat, i64 %add1

  %load.lds.0 = load i32, i32 addrspace(3)* %lds, align 4
  %load.lds.1 = load i32, i32 addrspace(3)* %lds.1, align 4

  %load.flat.0 = load i32, i32* %flat, align 4
  %load.flat.1 = load i32, i32* %flat.1, align 4

  %sub0 = sub i32 %load.flat.0, %load.lds.0
  %sub1 = sub i32 %load.flat.1, %load.lds.1

  store i32 %sub0, i32* undef
  store i32 %sub1, i32* undef
  ret void
}

; CHECK-LABEL: slp_crash_on_addrspacecast
; CHECK: ret void
define void @slp_crash_on_addrspacecast() {
entry:
  %0 = getelementptr inbounds i64, i64 addrspace(3)* undef, i32 undef
  %p0 = addrspacecast i64 addrspace(3)* %0 to i64*
  store i64 undef, i64* %p0, align 8
  %1 = getelementptr inbounds i64, i64 addrspace(3)* undef, i32 undef
  %p1 = addrspacecast i64 addrspace(3)* %1 to i64*
  store i64 undef, i64* %p1, align 8
  ret void
}