reference, declarationdefinition
definition → references, declarations, derived classes, virtual overrides
reference to multiple definitions → definitions
unreferenced
    1
    2
    3
    4
    5
    6
    7
    8
    9
   10
   11
   12
   13
   14
   15
   16
   17
   18
   19
   20
   21
   22
   23
   24
   25
   26
   27
   28
   29
   30
   31
   32
   33
   34
   35
   36
   37
   38
   39
   40
   41
   42
   43
   44
   45
   46
   47
   48
   49
   50
   51
   52
   53
   54
   55
   56
   57
   58
   59
   60
   61
   62
   63
   64
   65
   66
   67
   68
   69
   70
   71
   72
   73
   74
   75
   76
   77
   78
   79
   80
   81
   82
   83
   84
   85
   86
   87
   88
   89
   90
   91
   92
   93
   94
   95
   96
   97
   98
   99
  100
  101
  102
  103
  104
  105
  106
  107
  108
  109
  110
  111
  112
  113
  114
  115
  116
  117
  118
  119
  120
  121
  122
  123
  124
  125
  126
  127
  128
  129
  130
  131
  132
  133
  134
  135
  136
  137
  138
  139
  140
  141
  142
  143
  144
  145
  146
  147
  148
  149
  150
  151
  152
  153
  154
  155
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512vp2intersect -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,X86
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vp2intersect -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,X64

; Test with more than four live mask pairs

define void @test(<16 x i32> %a0, <16 x i32> %b0, <16 x i32> %a1, <16 x i32> %b1, <16 x i32> %a2, <16 x i32> %b2, <16 x i32> %a3, <16 x i32> %b3, <16 x i32> %a4, <16 x i32> %b4, i16* nocapture %m0, i16* nocapture %m1) nounwind {
; X86-LABEL: test:
; X86:       # %bb.0: # %entry
; X86-NEXT:    pushl %ebp
; X86-NEXT:    movl %esp, %ebp
; X86-NEXT:    pushl %edi
; X86-NEXT:    pushl %esi
; X86-NEXT:    andl $-64, %esp
; X86-NEXT:    subl $64, %esp
; X86-NEXT:    movl 456(%ebp), %esi
; X86-NEXT:    vmovaps 328(%ebp), %zmm3
; X86-NEXT:    vmovaps 200(%ebp), %zmm4
; X86-NEXT:    vmovaps 72(%ebp), %zmm5
; X86-NEXT:    vp2intersectd %zmm1, %zmm0, %k0
; X86-NEXT:    kmovw %k0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
; X86-NEXT:    kmovw %k1, {{[0-9]+}}(%esp)
; X86-NEXT:    vp2intersectd 8(%ebp), %zmm2, %k0
; X86-NEXT:    kmovw %k0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
; X86-NEXT:    kmovw %k1, {{[0-9]+}}(%esp)
; X86-NEXT:    vp2intersectd 136(%ebp), %zmm5, %k0
; X86-NEXT:    kmovw %k0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
; X86-NEXT:    kmovw %k1, {{[0-9]+}}(%esp)
; X86-NEXT:    vp2intersectd 264(%ebp), %zmm4, %k0
; X86-NEXT:    kmovw %k0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
; X86-NEXT:    kmovw %k1, {{[0-9]+}}(%esp)
; X86-NEXT:    vp2intersectd 392(%ebp), %zmm3, %k0
; X86-NEXT:    kmovw %k0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
; X86-NEXT:    kmovw %k1, {{[0-9]+}}(%esp)
; X86-NEXT:    vzeroupper
; X86-NEXT:    calll dummy
; X86-NEXT:    kmovw {{[-0-9]+}}(%e{{[sb]}}p), %k0 # 4-byte Folded Reload
; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
; X86-NEXT:    kmovw %k0, %eax
; X86-NEXT:    kmovw {{[-0-9]+}}(%e{{[sb]}}p), %k0 # 4-byte Folded Reload
; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
; X86-NEXT:    kmovw %k0, %ecx
; X86-NEXT:    kmovw {{[-0-9]+}}(%e{{[sb]}}p), %k0 # 4-byte Folded Reload
; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
; X86-NEXT:    kmovw %k0, %edx
; X86-NEXT:    kmovw {{[-0-9]+}}(%e{{[sb]}}p), %k0 # 4-byte Folded Reload
; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
; X86-NEXT:    kmovw %k0, %edi
; X86-NEXT:    addl %edi, %eax
; X86-NEXT:    kmovw {{[-0-9]+}}(%e{{[sb]}}p), %k2 # 4-byte Folded Reload
; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k3
; X86-NEXT:    kmovw %k2, %edi
; X86-NEXT:    addl %ecx, %edx
; X86-NEXT:    kmovw %k1, %ecx
; X86-NEXT:    addl %edi, %ecx
; X86-NEXT:    addl %eax, %ecx
; X86-NEXT:    addl %edx, %ecx
; X86-NEXT:    movw %cx, (%esi)
; X86-NEXT:    leal -8(%ebp), %esp
; X86-NEXT:    popl %esi
; X86-NEXT:    popl %edi
; X86-NEXT:    popl %ebp
; X86-NEXT:    retl
;
; X64-LABEL: test:
; X64:       # %bb.0: # %entry
; X64-NEXT:    pushq %rbp
; X64-NEXT:    movq %rsp, %rbp
; X64-NEXT:    pushq %r14
; X64-NEXT:    pushq %rbx
; X64-NEXT:    andq $-64, %rsp
; X64-NEXT:    subq $64, %rsp
; X64-NEXT:    movq %rdi, %r14
; X64-NEXT:    vmovaps 16(%rbp), %zmm8
; X64-NEXT:    vp2intersectd %zmm1, %zmm0, %k0
; X64-NEXT:    kmovw %k0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
; X64-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
; X64-NEXT:    vp2intersectd %zmm3, %zmm2, %k0
; X64-NEXT:    kmovw %k0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
; X64-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
; X64-NEXT:    vp2intersectd %zmm5, %zmm4, %k0
; X64-NEXT:    kmovw %k0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
; X64-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
; X64-NEXT:    vp2intersectd %zmm7, %zmm6, %k0
; X64-NEXT:    kmovw %k0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
; X64-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
; X64-NEXT:    vp2intersectd 80(%rbp), %zmm8, %k0
; X64-NEXT:    kmovw %k0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
; X64-NEXT:    kmovw %k1, {{[0-9]+}}(%rsp)
; X64-NEXT:    vzeroupper
; X64-NEXT:    callq dummy
; X64-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k0 # 4-byte Folded Reload
; X64-NEXT:    kmovw {{[0-9]+}}(%rsp), %k1
; X64-NEXT:    kmovw %k0, %eax
; X64-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k0 # 4-byte Folded Reload
; X64-NEXT:    kmovw {{[0-9]+}}(%rsp), %k1
; X64-NEXT:    kmovw %k0, %ecx
; X64-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k0 # 4-byte Folded Reload
; X64-NEXT:    kmovw {{[0-9]+}}(%rsp), %k1
; X64-NEXT:    kmovw %k0, %edx
; X64-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k0 # 4-byte Folded Reload
; X64-NEXT:    kmovw {{[0-9]+}}(%rsp), %k1
; X64-NEXT:    kmovw %k0, %esi
; X64-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k0 # 4-byte Folded Reload
; X64-NEXT:    kmovw {{[0-9]+}}(%rsp), %k1
; X64-NEXT:    kmovw %k0, %edi
; X64-NEXT:    kmovw %k1, %ebx
; X64-NEXT:    addl %edi, %eax
; X64-NEXT:    addl %ecx, %edx
; X64-NEXT:    leal (%rbx,%rsi), %ecx
; X64-NEXT:    addl %eax, %ecx
; X64-NEXT:    addl %edx, %ecx
; X64-NEXT:    movw %cx, (%r14)
; X64-NEXT:    leaq -16(%rbp), %rsp
; X64-NEXT:    popq %rbx
; X64-NEXT:    popq %r14
; X64-NEXT:    popq %rbp
; X64-NEXT:    retq
entry:
  %0 = call { <16 x i1>, <16 x i1> } @llvm.x86.avx512.vp2intersect.d.512(<16 x i32> %a0, <16 x i32> %b0)
  %1 = call { <16 x i1>, <16 x i1> } @llvm.x86.avx512.vp2intersect.d.512(<16 x i32> %a1, <16 x i32> %b1)
  %2 = call { <16 x i1>, <16 x i1> } @llvm.x86.avx512.vp2intersect.d.512(<16 x i32> %a2, <16 x i32> %b2)
  %3 = call { <16 x i1>, <16 x i1> } @llvm.x86.avx512.vp2intersect.d.512(<16 x i32> %a3, <16 x i32> %b3)
  %4 = call { <16 x i1>, <16 x i1> } @llvm.x86.avx512.vp2intersect.d.512(<16 x i32> %a4, <16 x i32> %b4)

  %5 = extractvalue { <16 x i1>, <16 x i1> } %0, 0
  %6 = extractvalue { <16 x i1>, <16 x i1> } %1, 0
  %7 = extractvalue { <16 x i1>, <16 x i1> } %2, 0
  %8 = extractvalue { <16 x i1>, <16 x i1> } %3, 0
  %9 = extractvalue { <16 x i1>, <16 x i1> } %4, 0
  %10 = extractvalue { <16 x i1>, <16 x i1> } %0, 1
  %11 = extractvalue { <16 x i1>, <16 x i1> } %1, 1

  call void @dummy()

  %12 = bitcast <16 x i1> %5 to i16
  %13 = bitcast <16 x i1> %6 to i16
  %14 = bitcast <16 x i1> %7 to i16
  %15 = bitcast <16 x i1> %8 to i16
  %16 = bitcast <16 x i1> %9 to i16
  %17 = bitcast <16 x i1> %10 to i16
  %18 = bitcast <16 x i1> %11 to i16

  %19 = add i16 %12, %13
  %20 = add i16 %14, %15
  %21 = add i16 %16, %17
  %22 = add i16 %19, %21
  %23 = add i16 %22, %20

  store i16 %23, i16* %m0, align 16
  ret void
}

declare { <16 x i1>, <16 x i1> } @llvm.x86.avx512.vp2intersect.d.512(<16 x i32>, <16 x i32>)
declare void @dummy()