reference, declarationdefinition
definition → references, declarations, derived classes, virtual overrides
reference to multiple definitions → definitions
unreferenced
    1
    2
    3
    4
    5
    6
    7
    8
    9
   10
   11
   12
   13
   14
   15
   16
   17
   18
   19
   20
   21
   22
   23
   24
   25
   26
   27
   28
   29
   30
   31
   32
   33
   34
   35
   36
   37
   38
   39
   40
   41
   42
   43
   44
   45
   46
   47
   48
   49
   50
   51
   52
   53
   54
   55
   56
   57
   58
   59
   60
   61
   62
   63
   64
   65
   66
   67
   68
   69
   70
   71
   72
   73
   74
   75
   76
   77
   78
   79
   80
   81
   82
   83
   84
   85
   86
   87
   88
   89
   90
   91
   92
   93
   94
   95
   96
   97
   98
   99
  100
  101
  102
  103
  104
  105
  106
  107
  108
  109
  110
  111
  112
  113
  114
  115
  116
  117
  118
  119
  120
  121
  122
  123
  124
  125
  126
  127
  128
  129
  130
  131
  132
  133
  134
  135
  136
  137
  138
  139
  140
  141
  142
  143
  144
  145
  146
  147
  148
  149
  150
  151
  152
  153
  154
  155
  156
  157
  158
  159
  160
  161
  162
  163
  164
  165
  166
  167
  168
  169
  170
  171
  172
  173
  174
  175
  176
  177
  178
  179
  180
  181
  182
  183
  184
  185
  186
  187
  188
  189
  190
  191
  192
  193
  194
  195
  196
  197
  198
  199
  200
  201
  202
  203
  204
  205
  206
  207
  208
  209
  210
  211
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=mips64el-linux-gnu -mcpu=mips64r6 -mattr=+msa,+fp64 < %s | FileCheck %s --check-prefixes=MIPSEL64R6
; RUN: llc -mtriple=mipsel-linux-gnu -mcpu=mips32r5 -mattr=+msa,+fp64 < %s | FileCheck %s --check-prefixes=MIPSEL32R5

declare <2 x i64> @llvm.mips.slli.d(<2 x i64>, i32)
declare <2 x i64> @llvm.mips.srli.d(<2 x i64>, i32)

declare <4 x i32> @llvm.mips.slli.w(<4 x i32>, i32)
declare <4 x i32> @llvm.mips.srli.w(<4 x i32>, i32)

; do not fold (shl (srl x, c1), c2) -> (and (srl x, (sub c1, c2), MASK) if C1 < C2
; MASK_TYPE1 = C2-C1 0s | 1s | ends with C1 0s
define void @avoid_to_combine_shifts_to_shift_plus_and_mask_type1_i64(<2 x i64>* %a, <2 x i64>* %b) {
; MIPSEL64R6-LABEL: avoid_to_combine_shifts_to_shift_plus_and_mask_type1_i64:
; MIPSEL64R6:       # %bb.0: # %entry
; MIPSEL64R6-NEXT:    ld.d $w0, 0($4)
; MIPSEL64R6-NEXT:    srli.d $w0, $w0, 52
; MIPSEL64R6-NEXT:    slli.d $w0, $w0, 51
; MIPSEL64R6-NEXT:    jr $ra
; MIPSEL64R6-NEXT:    st.d $w0, 0($5)
;
; MIPSEL32R5-LABEL: avoid_to_combine_shifts_to_shift_plus_and_mask_type1_i64:
; MIPSEL32R5:       # %bb.0: # %entry
; MIPSEL32R5-NEXT:    ld.d $w0, 0($4)
; MIPSEL32R5-NEXT:    srli.d $w0, $w0, 52
; MIPSEL32R5-NEXT:    slli.d $w0, $w0, 51
; MIPSEL32R5-NEXT:    jr $ra
; MIPSEL32R5-NEXT:    st.d $w0, 0($5)
entry:
  %0 = load <2 x i64>, <2 x i64>* %a
  %1 = tail call <2 x i64> @llvm.mips.srli.d(<2 x i64> %0, i32 52)
  %2 = tail call <2 x i64> @llvm.mips.slli.d(<2 x i64> %1, i32 51)
  store <2 x i64> %2, <2 x i64>* %b
  ret void
}

; do not fold (shl (srl x, c1), c2) -> (and (srl x, (sub c1, c2), MASK) if C1 < C2
define void @avoid_to_combine_shifts_to_shift_plus_and_mask_type1_i64_long(<2 x i64>* %a, <2 x i64>* %b) {
; MIPSEL64R6-LABEL: avoid_to_combine_shifts_to_shift_plus_and_mask_type1_i64_long:
; MIPSEL64R6:       # %bb.0: # %entry
; MIPSEL64R6-NEXT:    ld.d $w0, 0($4)
; MIPSEL64R6-NEXT:    srli.d $w0, $w0, 6
; MIPSEL64R6-NEXT:    slli.d $w0, $w0, 4
; MIPSEL64R6-NEXT:    jr $ra
; MIPSEL64R6-NEXT:    st.d $w0, 0($5)
;
; MIPSEL32R5-LABEL: avoid_to_combine_shifts_to_shift_plus_and_mask_type1_i64_long:
; MIPSEL32R5:       # %bb.0: # %entry
; MIPSEL32R5-NEXT:    ld.d $w0, 0($4)
; MIPSEL32R5-NEXT:    srli.d $w0, $w0, 6
; MIPSEL32R5-NEXT:    slli.d $w0, $w0, 4
; MIPSEL32R5-NEXT:    jr $ra
; MIPSEL32R5-NEXT:    st.d $w0, 0($5)
entry:
  %0 = load <2 x i64>, <2 x i64>* %a
  %1 = tail call <2 x i64> @llvm.mips.srli.d(<2 x i64> %0, i32 6)
  %2 = tail call <2 x i64> @llvm.mips.slli.d(<2 x i64> %1, i32 4)
  store <2 x i64> %2, <2 x i64>* %b
  ret void
}

; do not fold (shl (srl x, c1), c2) -> (and (shl x, (sub c1, c2), MASK) if C1 >= C2
; MASK_TYPE2 = 1s | C1 zeros
define void @avoid_to_combine_shifts_to_shift_plus_and_mask_type2_i32(<2 x i64>* %a, <2 x i64>* %b) {
; MIPSEL64R6-LABEL: avoid_to_combine_shifts_to_shift_plus_and_mask_type2_i32:
; MIPSEL64R6:       # %bb.0: # %entry
; MIPSEL64R6-NEXT:    ld.d $w0, 0($4)
; MIPSEL64R6-NEXT:    srli.d $w0, $w0, 4
; MIPSEL64R6-NEXT:    slli.d $w0, $w0, 6
; MIPSEL64R6-NEXT:    jr $ra
; MIPSEL64R6-NEXT:    st.d $w0, 0($5)
;
; MIPSEL32R5-LABEL: avoid_to_combine_shifts_to_shift_plus_and_mask_type2_i32:
; MIPSEL32R5:       # %bb.0: # %entry
; MIPSEL32R5-NEXT:    ld.d $w0, 0($4)
; MIPSEL32R5-NEXT:    srli.d $w0, $w0, 4
; MIPSEL32R5-NEXT:    slli.d $w0, $w0, 6
; MIPSEL32R5-NEXT:    jr $ra
; MIPSEL32R5-NEXT:    st.d $w0, 0($5)
entry:
  %0 = load <2 x i64>, <2 x i64>* %a
  %1 = tail call <2 x i64> @llvm.mips.srli.d(<2 x i64> %0, i32 4)
  %2 = tail call <2 x i64> @llvm.mips.slli.d(<2 x i64> %1, i32 6)
  store <2 x i64> %2, <2 x i64>* %b
  ret void
}

; do not fold (shl (srl x, c1), c2) -> (and (srl x, (sub c1, c2), MASK) if C1 < C2
define void @avoid_to_combine_shifts_to_shift_plus_and_mask_type1_i32_long(<4 x i32>* %a, <4 x i32>* %b) {
; MIPSEL64R6-LABEL: avoid_to_combine_shifts_to_shift_plus_and_mask_type1_i32_long:
; MIPSEL64R6:       # %bb.0: # %entry
; MIPSEL64R6-NEXT:    ld.w $w0, 0($4)
; MIPSEL64R6-NEXT:    srli.w $w0, $w0, 7
; MIPSEL64R6-NEXT:    slli.w $w0, $w0, 3
; MIPSEL64R6-NEXT:    jr $ra
; MIPSEL64R6-NEXT:    st.w $w0, 0($5)
;
; MIPSEL32R5-LABEL: avoid_to_combine_shifts_to_shift_plus_and_mask_type1_i32_long:
; MIPSEL32R5:       # %bb.0: # %entry
; MIPSEL32R5-NEXT:    ld.w $w0, 0($4)
; MIPSEL32R5-NEXT:    srli.w $w0, $w0, 7
; MIPSEL32R5-NEXT:    slli.w $w0, $w0, 3
; MIPSEL32R5-NEXT:    jr $ra
; MIPSEL32R5-NEXT:    st.w $w0, 0($5)
entry:
  %0 = load <4 x i32>, <4 x i32>* %a
  %1 = tail call <4 x i32> @llvm.mips.srli.w(<4 x i32> %0, i32 7)
  %2 = tail call <4 x i32> @llvm.mips.slli.w(<4 x i32> %1, i32 3)
  store <4 x i32> %2, <4 x i32>* %b
  ret void
}

; do not fold (shl (sra x, c1), c1) -> (and x, (shl -1, c1))
define void @avoid_to_combine_shifts_to_and_mask_type2_i64_long(<2 x i64>* %a, <2 x i64>* %b) {
; MIPSEL64R6-LABEL: avoid_to_combine_shifts_to_and_mask_type2_i64_long:
; MIPSEL64R6:       # %bb.0: # %entry
; MIPSEL64R6-NEXT:    ld.d $w0, 0($4)
; MIPSEL64R6-NEXT:    srli.d $w0, $w0, 38
; MIPSEL64R6-NEXT:    slli.d $w0, $w0, 38
; MIPSEL64R6-NEXT:    jr $ra
; MIPSEL64R6-NEXT:    st.d $w0, 0($5)
;
; MIPSEL32R5-LABEL: avoid_to_combine_shifts_to_and_mask_type2_i64_long:
; MIPSEL32R5:       # %bb.0: # %entry
; MIPSEL32R5-NEXT:    ld.d $w0, 0($4)
; MIPSEL32R5-NEXT:    srli.d $w0, $w0, 38
; MIPSEL32R5-NEXT:    slli.d $w0, $w0, 38
; MIPSEL32R5-NEXT:    jr $ra
; MIPSEL32R5-NEXT:    st.d $w0, 0($5)
entry:
  %0 = load <2 x i64>, <2 x i64>* %a
  %1 = tail call <2 x i64> @llvm.mips.srli.d(<2 x i64> %0, i32 38)
  %2 = tail call <2 x i64> @llvm.mips.slli.d(<2 x i64> %1, i32 38)
  store <2 x i64> %2, <2 x i64>* %b
  ret void
}

; do not fold (shl (sra x, c1), c1) -> (and x, (shl -1, c1))
define void @avoid_to_combine_shifts_to_and_mask_type2_i64(<2 x i64>* %a, <2 x i64>* %b) {
; MIPSEL64R6-LABEL: avoid_to_combine_shifts_to_and_mask_type2_i64:
; MIPSEL64R6:       # %bb.0: # %entry
; MIPSEL64R6-NEXT:    ld.d $w0, 0($4)
; MIPSEL64R6-NEXT:    srli.d $w0, $w0, 3
; MIPSEL64R6-NEXT:    slli.d $w0, $w0, 3
; MIPSEL64R6-NEXT:    jr $ra
; MIPSEL64R6-NEXT:    st.d $w0, 0($5)
;
; MIPSEL32R5-LABEL: avoid_to_combine_shifts_to_and_mask_type2_i64:
; MIPSEL32R5:       # %bb.0: # %entry
; MIPSEL32R5-NEXT:    ld.d $w0, 0($4)
; MIPSEL32R5-NEXT:    srli.d $w0, $w0, 3
; MIPSEL32R5-NEXT:    slli.d $w0, $w0, 3
; MIPSEL32R5-NEXT:    jr $ra
; MIPSEL32R5-NEXT:    st.d $w0, 0($5)
entry:
  %0 = load <2 x i64>, <2 x i64>* %a
  %1 = tail call <2 x i64> @llvm.mips.srli.d(<2 x i64> %0, i32 3)
  %2 = tail call <2 x i64> @llvm.mips.slli.d(<2 x i64> %1, i32 3)
  store <2 x i64> %2, <2 x i64>* %b
  ret void
}

; do not fold (shl (sra x, c1), c1) -> (and x, (shl -1, c1))
define void @avoid_to_combine_shifts_to_and_mask_type1_long_i32_a(<4 x i32>* %a, <4 x i32>* %b) {
; MIPSEL64R6-LABEL: avoid_to_combine_shifts_to_and_mask_type1_long_i32_a:
; MIPSEL64R6:       # %bb.0: # %entry
; MIPSEL64R6-NEXT:    ld.w $w0, 0($4)
; MIPSEL64R6-NEXT:    srli.w $w0, $w0, 5
; MIPSEL64R6-NEXT:    slli.w $w0, $w0, 5
; MIPSEL64R6-NEXT:    jr $ra
; MIPSEL64R6-NEXT:    st.w $w0, 0($5)
;
; MIPSEL32R5-LABEL: avoid_to_combine_shifts_to_and_mask_type1_long_i32_a:
; MIPSEL32R5:       # %bb.0: # %entry
; MIPSEL32R5-NEXT:    ld.w $w0, 0($4)
; MIPSEL32R5-NEXT:    srli.w $w0, $w0, 5
; MIPSEL32R5-NEXT:    slli.w $w0, $w0, 5
; MIPSEL32R5-NEXT:    jr $ra
; MIPSEL32R5-NEXT:    st.w $w0, 0($5)
entry:
  %0 = load <4 x i32>, <4 x i32>* %a
  %1 = tail call <4 x i32> @llvm.mips.srli.w(<4 x i32> %0, i32 5)
  %2 = tail call <4 x i32> @llvm.mips.slli.w(<4 x i32> %1, i32 5)
  store <4 x i32> %2, <4 x i32>* %b
  ret void
}

; do not fold (shl (sra x, c1), c1) -> (and x, (shl -1, c1))
define void @avoid_to_combine_shifts_to_and_mask_type1_long_i32_b(<4 x i32>* %a, <4 x i32>* %b) {
; MIPSEL64R6-LABEL: avoid_to_combine_shifts_to_and_mask_type1_long_i32_b:
; MIPSEL64R6:       # %bb.0: # %entry
; MIPSEL64R6-NEXT:    ld.w $w0, 0($4)
; MIPSEL64R6-NEXT:    srli.w $w0, $w0, 30
; MIPSEL64R6-NEXT:    slli.w $w0, $w0, 30
; MIPSEL64R6-NEXT:    jr $ra
; MIPSEL64R6-NEXT:    st.w $w0, 0($5)
;
; MIPSEL32R5-LABEL: avoid_to_combine_shifts_to_and_mask_type1_long_i32_b:
; MIPSEL32R5:       # %bb.0: # %entry
; MIPSEL32R5-NEXT:    ld.w $w0, 0($4)
; MIPSEL32R5-NEXT:    srli.w $w0, $w0, 30
; MIPSEL32R5-NEXT:    slli.w $w0, $w0, 30
; MIPSEL32R5-NEXT:    jr $ra
; MIPSEL32R5-NEXT:    st.w $w0, 0($5)
entry:
  %0 = load <4 x i32>, <4 x i32>* %a
  %1 = tail call <4 x i32> @llvm.mips.srli.w(<4 x i32> %0, i32 30)
  %2 = tail call <4 x i32> @llvm.mips.slli.w(<4 x i32> %1, i32 30)
  store <4 x i32> %2, <4 x i32>* %b
  ret void
}