reference, declarationdefinition
definition → references, declarations, derived classes, virtual overrides
reference to multiple definitions → definitions
unreferenced
    1
    2
    3
    4
    5
    6
    7
    8
    9
   10
   11
   12
   13
   14
   15
   16
   17
   18
   19
   20
   21
   22
   23
   24
   25
   26
   27
   28
   29
   30
   31
   32
   33
   34
   35
   36
   37
   38
   39
   40
   41
   42
   43
   44
   45
   46
   47
   48
   49
   50
   51
   52
   53
   54
   55
   56
   57
   58
   59
   60
   61
   62
   63
   64
   65
   66
   67
   68
   69
   70
   71
   72
   73
   74
   75
   76
   77
   78
   79
   80
   81
   82
   83
   84
   85
   86
   87
   88
   89
   90
   91
   92
   93
   94
   95
   96
   97
   98
   99
  100
  101
  102
  103
  104
  105
  106
  107
  108
  109
  110
  111
  112
  113
  114
  115
  116
  117
  118
  119
  120
  121
  122
  123
  124
  125
  126
  127
  128
  129
  130
  131
  132
  133
  134
  135
  136
  137
  138
  139
  140
  141
  142
  143
  144
  145
  146
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=arm-eabi -mcpu=krait | FileCheck %s

define void @func1(i16* %a, i16* %b, i16* %c) {
; CHECK-LABEL: func1:
; CHECK:       @ %bb.0: @ %entry
; CHECK-NEXT:    add r3, r1, #16
; CHECK-NEXT:    vldr d18, [r2, #16]
; CHECK-NEXT:    vld1.16 {d16}, [r3:64]
; CHECK-NEXT:    vmovl.u16 q8, d16
; CHECK-NEXT:    vaddw.s16 q10, q8, d18
; CHECK-NEXT:    vmovn.i32 d19, q10
; CHECK-NEXT:    vldr d20, [r0, #16]
; CHECK-NEXT:    vstr d19, [r0, #16]
; CHECK-NEXT:    vldr d19, [r2, #16]
; CHECK-NEXT:    vmull.s16 q11, d18, d19
; CHECK-NEXT:    vmovl.s16 q9, d19
; CHECK-NEXT:    vmla.i32 q11, q8, q9
; CHECK-NEXT:    vmovn.i32 d16, q11
; CHECK-NEXT:    vstr d16, [r1, #16]
; CHECK-NEXT:    vldr d16, [r2, #16]
; CHECK-NEXT:    vmlal.s16 q11, d16, d20
; CHECK-NEXT:    vmovn.i32 d16, q11
; CHECK-NEXT:    vstr d16, [r0, #16]
; CHECK-NEXT:    bx lr
entry:
; The test case trying to vectorize the pseudo code below.
; a[i] = b[i] + c[i];
; b[i] = a[i] * c[i];
; a[i] = b[i] + a[i] * c[i];
; Checking that vector load a[i] for "a[i] = b[i] + a[i] * c[i]" is
; scheduled before the first vector store to "a[i] = b[i] + c[i]".
; Checking that there is no vector load a[i] scheduled between the vector
; stores to a[i], otherwise the load of a[i] will be polluted by the first
; vector store to a[i].
; This test case check that the chain information is updated during
; lowerMUL for the new created Load SDNode.


  %scevgep0 = getelementptr i16, i16* %a, i32 8
  %vector_ptr0 = bitcast i16* %scevgep0 to <4 x i16>*
  %vec0 = load <4 x i16>, <4 x i16>* %vector_ptr0, align 8
  %scevgep1 = getelementptr i16, i16* %b, i32 8
  %vector_ptr1 = bitcast i16* %scevgep1 to <4 x i16>*
  %vec1 = load <4 x i16>, <4 x i16>* %vector_ptr1, align 8
  %0 = zext <4 x i16> %vec1 to <4 x i32>
  %scevgep2 = getelementptr i16, i16* %c, i32 8
  %vector_ptr2 = bitcast i16* %scevgep2 to <4 x i16>*
  %vec2 = load <4 x i16>, <4 x i16>* %vector_ptr2, align 8
  %1 = sext <4 x i16> %vec2 to <4 x i32>
  %vec3 = add <4 x i32> %1, %0
  %2 = trunc <4 x i32> %vec3 to <4 x i16>
  %scevgep3 = getelementptr i16, i16* %a, i32 8
  %vector_ptr3 = bitcast i16* %scevgep3 to <4 x i16>*
  store <4 x i16> %2, <4 x i16>* %vector_ptr3, align 8
  %vector_ptr4 = bitcast i16* %scevgep2 to <4 x i16>*
  %vec4 = load <4 x i16>, <4 x i16>* %vector_ptr4, align 8
  %3 = sext <4 x i16> %vec4 to <4 x i32>
  %vec5 = mul <4 x i32> %3, %vec3
  %4 = trunc <4 x i32> %vec5 to <4 x i16>
  %vector_ptr5 = bitcast i16* %scevgep1 to <4 x i16>*
  store <4 x i16> %4, <4 x i16>* %vector_ptr5, align 8
  %5 = sext <4 x i16> %vec0 to <4 x i32>
  %vector_ptr6 = bitcast i16* %scevgep2 to <4 x i16>*
  %vec6 = load <4 x i16>, <4 x i16>* %vector_ptr6, align 8
  %6 = sext <4 x i16> %vec6 to <4 x i32>
  %vec7 = mul <4 x i32> %6, %5
  %vec8 = add <4 x i32> %vec7, %vec5
  %7 = trunc <4 x i32> %vec8 to <4 x i16>
  %vector_ptr7 = bitcast i16* %scevgep3 to <4 x i16>*
  store <4 x i16> %7, <4 x i16>* %vector_ptr7, align 8
  ret void
}

define void @func2(i16* %a, i16* %b, i16* %c) {
; CHECK-LABEL: func2:
; CHECK:       @ %bb.0: @ %entry
; CHECK-NEXT:    add r3, r1, #16
; CHECK-NEXT:    vldr d18, [r2, #16]
; CHECK-NEXT:    vld1.16 {d16}, [r3:64]
; CHECK-NEXT:    vmovl.u16 q8, d16
; CHECK-NEXT:    vaddw.s16 q10, q8, d18
; CHECK-NEXT:    vmovn.i32 d19, q10
; CHECK-NEXT:    vldr d20, [r0, #16]
; CHECK-NEXT:    vstr d19, [r0, #16]
; CHECK-NEXT:    vldr d19, [r2, #16]
; CHECK-NEXT:    vmull.s16 q11, d18, d19
; CHECK-NEXT:    vmovl.s16 q9, d19
; CHECK-NEXT:    vmla.i32 q11, q8, q9
; CHECK-NEXT:    vmovn.i32 d16, q11
; CHECK-NEXT:    vstr d16, [r1, #16]
; CHECK-NEXT:    vldr d16, [r2, #16]
; CHECK-NEXT:    vmlal.s16 q11, d16, d20
; CHECK-NEXT:    vaddw.s16 q8, q11, d20
; CHECK-NEXT:    vmovn.i32 d16, q8
; CHECK-NEXT:    vstr d16, [r0, #16]
; CHECK-NEXT:    bx lr
entry:
; The test case trying to vectorize the pseudo code below.
; a[i] = b[i] + c[i];
; b[i] = a[i] * c[i];
; a[i] = b[i] + a[i] * c[i] + a[i];
; Checking that vector load a[i] for "a[i] = b[i] + a[i] * c[i] + a[i]"
; is scheduled before the first vector store to "a[i] = b[i] + c[i]".
; Checking that there is no vector load a[i] scheduled between the first
; vector store to a[i] and the vector add of a[i], otherwise the load of
; a[i] will be polluted by the first vector store to a[i].
; This test case check that both the chain and value of the new created
; Load SDNode are updated during lowerMUL.


  %scevgep0 = getelementptr i16, i16* %a, i32 8
  %vector_ptr0 = bitcast i16* %scevgep0 to <4 x i16>*
  %vec0 = load <4 x i16>, <4 x i16>* %vector_ptr0, align 8
  %scevgep1 = getelementptr i16, i16* %b, i32 8
  %vector_ptr1 = bitcast i16* %scevgep1 to <4 x i16>*
  %vec1 = load <4 x i16>, <4 x i16>* %vector_ptr1, align 8
  %0 = zext <4 x i16> %vec1 to <4 x i32>
  %scevgep2 = getelementptr i16, i16* %c, i32 8
  %vector_ptr2 = bitcast i16* %scevgep2 to <4 x i16>*
  %vec2 = load <4 x i16>, <4 x i16>* %vector_ptr2, align 8
  %1 = sext <4 x i16> %vec2 to <4 x i32>
  %vec3 = add <4 x i32> %1, %0
  %2 = trunc <4 x i32> %vec3 to <4 x i16>
  %scevgep3 = getelementptr i16, i16* %a, i32 8
  %vector_ptr3 = bitcast i16* %scevgep3 to <4 x i16>*
  store <4 x i16> %2, <4 x i16>* %vector_ptr3, align 8
  %vector_ptr4 = bitcast i16* %scevgep2 to <4 x i16>*
  %vec4 = load <4 x i16>, <4 x i16>* %vector_ptr4, align 8
  %3 = sext <4 x i16> %vec4 to <4 x i32>
  %vec5 = mul <4 x i32> %3, %vec3
  %4 = trunc <4 x i32> %vec5 to <4 x i16>
  %vector_ptr5 = bitcast i16* %scevgep1 to <4 x i16>*
  store <4 x i16> %4, <4 x i16>* %vector_ptr5, align 8
  %5 = sext <4 x i16> %vec0 to <4 x i32>
  %vector_ptr6 = bitcast i16* %scevgep2 to <4 x i16>*
  %vec6 = load <4 x i16>, <4 x i16>* %vector_ptr6, align 8
  %6 = sext <4 x i16> %vec6 to <4 x i32>
  %vec7 = mul <4 x i32> %6, %5
  %vec8 = add <4 x i32> %vec7, %vec5
  %vec9 = add <4 x i32> %vec8, %5
  %7 = trunc <4 x i32> %vec9 to <4 x i16>
  %vector_ptr7 = bitcast i16* %scevgep3 to <4 x i16>*
  store <4 x i16> %7, <4 x i16>* %vector_ptr7, align 8
  ret void
}