reference, declarationdefinition
definition → references, declarations, derived classes, virtual overrides
reference to multiple definitions → definitions
unreferenced
    1
    2
    3
    4
    5
    6
    7
    8
    9
   10
   11
   12
   13
   14
   15
   16
   17
   18
   19
   20
   21
   22
   23
   24
   25
   26
   27
   28
   29
   30
   31
   32
   33
   34
   35
   36
   37
   38
   39
   40
   41
   42
   43
   44
   45
   46
   47
   48
   49
   50
   51
   52
   53
   54
   55
   56
   57
   58
   59
   60
   61
   62
   63
   64
   65
   66
   67
   68
   69
   70
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=skx --show-mc-encoding | FileCheck %s

define i32 @f256(<8 x float> %A, <8 x float> %AA, i8* %B, <4 x double> %C, <4 x double> %CC, i32 %D, <4 x i64> %E, <4 x i64> %EE, i32* %loadptr) {
; CHECK-LABEL: f256:
; CHECK:       ## %bb.0:
; CHECK-NEXT:    movl (%rdx), %eax ## encoding: [0x8b,0x02]
; CHECK-NEXT:    vaddps %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x58,0xc1]
; CHECK-NEXT:    vmovntps %ymm0, (%rdi) ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x2b,0x07]
; CHECK-NEXT:    vpaddq %ymm5, %ymm4, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xd4,0xc5]
; CHECK-NEXT:    addl (%rdx), %eax ## encoding: [0x03,0x02]
; CHECK-NEXT:    vmovntdq %ymm0, (%rdi) ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe7,0x07]
; CHECK-NEXT:    vaddpd %ymm3, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0x58,0xc3]
; CHECK-NEXT:    addl (%rdx), %eax ## encoding: [0x03,0x02]
; CHECK-NEXT:    vmovntpd %ymm0, (%rdi) ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x2b,0x07]
; CHECK-NEXT:    addl (%rdx), %eax ## encoding: [0x03,0x02]
; CHECK-NEXT:    vzeroupper ## encoding: [0xc5,0xf8,0x77]
; CHECK-NEXT:    retq ## encoding: [0xc3]
  %v0 = load i32, i32* %loadptr, align 1
  %cast = bitcast i8* %B to <8 x float>*
  %A2 = fadd <8 x float> %A, %AA
  store <8 x float> %A2, <8 x float>* %cast, align 64, !nontemporal !0
  %v1 = load i32, i32* %loadptr, align 1
  %cast1 = bitcast i8* %B to <4 x i64>*
  %E2 = add <4 x i64> %E, %EE
  store <4 x i64> %E2, <4 x i64>* %cast1, align 64, !nontemporal !0
  %v2 = load i32, i32* %loadptr, align 1
  %cast2 = bitcast i8* %B to <4 x double>*
  %C2 = fadd <4 x double> %C, %CC
  store <4 x double> %C2, <4 x double>* %cast2, align 64, !nontemporal !0
  %v3 = load i32, i32* %loadptr, align 1
  %sum1 = add i32 %v0, %v1
  %sum2 = add i32 %sum1, %v2
  %sum3 = add i32 %sum2, %v3
  ret i32 %sum3
}

define i32  @f128(<4 x float> %A, <4 x float> %AA, i8* %B, <2 x double> %C, <2 x double> %CC, i32 %D, <2 x i64> %E, <2 x i64> %EE, i32* %loadptr) {
; CHECK-LABEL: f128:
; CHECK:       ## %bb.0:
; CHECK-NEXT:    movl (%rdx), %eax ## encoding: [0x8b,0x02]
; CHECK-NEXT:    vaddps %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x58,0xc1]
; CHECK-NEXT:    vmovntps %xmm0, (%rdi) ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2b,0x07]
; CHECK-NEXT:    vpaddq %xmm5, %xmm4, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xd4,0xc5]
; CHECK-NEXT:    addl (%rdx), %eax ## encoding: [0x03,0x02]
; CHECK-NEXT:    vmovntdq %xmm0, (%rdi) ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe7,0x07]
; CHECK-NEXT:    vaddpd %xmm3, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0x58,0xc3]
; CHECK-NEXT:    addl (%rdx), %eax ## encoding: [0x03,0x02]
; CHECK-NEXT:    vmovntpd %xmm0, (%rdi) ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2b,0x07]
; CHECK-NEXT:    addl (%rdx), %eax ## encoding: [0x03,0x02]
; CHECK-NEXT:    retq ## encoding: [0xc3]
  %v0 = load i32, i32* %loadptr, align 1
  %cast = bitcast i8* %B to <4 x float>*
  %A2 = fadd <4 x float> %A, %AA
  store <4 x float> %A2, <4 x float>* %cast, align 64, !nontemporal !0
  %v1 = load i32, i32* %loadptr, align 1
  %cast1 = bitcast i8* %B to <2 x i64>*
  %E2 = add <2 x i64> %E, %EE
  store <2 x i64> %E2, <2 x i64>* %cast1, align 64, !nontemporal !0
  %v2 = load i32, i32* %loadptr, align 1
  %cast2 = bitcast i8* %B to <2 x double>*
  %C2 = fadd <2 x double> %C, %CC
  store <2 x double> %C2, <2 x double>* %cast2, align 64, !nontemporal !0
  %v3 = load i32, i32* %loadptr, align 1
  %sum1 = add i32 %v0, %v1
  %sum2 = add i32 %sum1, %v2
  %sum3 = add i32 %sum2, %v3
  ret i32 %sum3
}
!0 = !{i32 1}