reference, declarationdefinition
definition → references, declarations, derived classes, virtual overrides
reference to multiple definitions → definitions
unreferenced
    1
    2
    3
    4
    5
    6
    7
    8
    9
   10
   11
   12
   13
   14
   15
   16
   17
   18
   19
   20
   21
   22
   23
   24
   25
   26
   27
   28
   29
   30
   31
   32
   33
   34
   35
   36
   37
   38
   39
   40
   41
   42
   43
   44
   45
   46
   47
   48
   49
   50
   51
   52
   53
   54
   55
   56
   57
   58
   59
   60
   61
   62
   63
   64
   65
   66
   67
   68
   69
   70
   71
   72
   73
   74
   75
   76
   77
   78
   79
   80
   81
   82
   83
   84
   85
   86
   87
   88
   89
   90
   91
   92
   93
   94
   95
   96
   97
   98
   99
  100
  101
  102
  103
  104
  105
  106
  107
  108
  109
  110
  111
  112
  113
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-darwin -mattr=+mmx,+sse2 | FileCheck %s

define i64 @t0(x86_mmx* %p) {
; CHECK-LABEL: t0:
; CHECK:       ## %bb.0:
; CHECK-NEXT:    movq (%rdi), %mm0
; CHECK-NEXT:    paddq %mm0, %mm0
; CHECK-NEXT:    movq %mm0, %rax
; CHECK-NEXT:    retq
  %t = load x86_mmx, x86_mmx* %p
  %u = tail call x86_mmx @llvm.x86.mmx.padd.q(x86_mmx %t, x86_mmx %t)
  %s = bitcast x86_mmx %u to i64
  ret i64 %s
}

define i64 @t1(x86_mmx* %p) {
; CHECK-LABEL: t1:
; CHECK:       ## %bb.0:
; CHECK-NEXT:    movq (%rdi), %mm0
; CHECK-NEXT:    paddd %mm0, %mm0
; CHECK-NEXT:    movq %mm0, %rax
; CHECK-NEXT:    retq
  %t = load x86_mmx, x86_mmx* %p
  %u = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %t, x86_mmx %t)
  %s = bitcast x86_mmx %u to i64
  ret i64 %s
}

define i64 @t2(x86_mmx* %p) {
; CHECK-LABEL: t2:
; CHECK:       ## %bb.0:
; CHECK-NEXT:    movq (%rdi), %mm0
; CHECK-NEXT:    paddw %mm0, %mm0
; CHECK-NEXT:    movq %mm0, %rax
; CHECK-NEXT:    retq
  %t = load x86_mmx, x86_mmx* %p
  %u = tail call x86_mmx @llvm.x86.mmx.padd.w(x86_mmx %t, x86_mmx %t)
  %s = bitcast x86_mmx %u to i64
  ret i64 %s
}

define i64 @t3(x86_mmx* %p) {
; CHECK-LABEL: t3:
; CHECK:       ## %bb.0:
; CHECK-NEXT:    movq (%rdi), %mm0
; CHECK-NEXT:    paddb %mm0, %mm0
; CHECK-NEXT:    movq %mm0, %rax
; CHECK-NEXT:    retq
  %t = load x86_mmx, x86_mmx* %p
  %u = tail call x86_mmx @llvm.x86.mmx.padd.b(x86_mmx %t, x86_mmx %t)
  %s = bitcast x86_mmx %u to i64
  ret i64 %s
}

@R = external global x86_mmx

define void @t4(<1 x i64> %A, <1 x i64> %B) {
; CHECK-LABEL: t4:
; CHECK:       ## %bb.0: ## %entry
; CHECK-NEXT:    movq %rdi, %mm0
; CHECK-NEXT:    movq %rsi, %mm1
; CHECK-NEXT:    paddusw %mm0, %mm1
; CHECK-NEXT:    movq _R@{{.*}}(%rip), %rax
; CHECK-NEXT:    movq %mm1, (%rax)
; CHECK-NEXT:    emms
; CHECK-NEXT:    retq
entry:
  %tmp2 = bitcast <1 x i64> %A to x86_mmx
  %tmp3 = bitcast <1 x i64> %B to x86_mmx
  %tmp7 = tail call x86_mmx @llvm.x86.mmx.paddus.w(x86_mmx %tmp2, x86_mmx %tmp3)
  store x86_mmx %tmp7, x86_mmx* @R
  tail call void @llvm.x86.mmx.emms()
  ret void
}

define i64 @t5(i32 %a, i32 %b) nounwind readnone {
; CHECK-LABEL: t5:
; CHECK:       ## %bb.0:
; CHECK-NEXT:    movd %esi, %xmm0
; CHECK-NEXT:    movd %edi, %xmm1
; CHECK-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; CHECK-NEXT:    movq %xmm1, %rax
; CHECK-NEXT:    retq
  %v0 = insertelement <2 x i32> undef, i32 %a, i32 0
  %v1 = insertelement <2 x i32> %v0, i32 %b, i32 1
  %conv = bitcast <2 x i32> %v1 to i64
  ret i64 %conv
}

declare x86_mmx @llvm.x86.mmx.pslli.q(x86_mmx, i32)

define <1 x i64> @t6(i64 %t) {
; CHECK-LABEL: t6:
; CHECK:       ## %bb.0:
; CHECK-NEXT:    movq %rdi, %mm0
; CHECK-NEXT:    psllq $48, %mm0
; CHECK-NEXT:    movq %mm0, %rax
; CHECK-NEXT:    retq
  %t1 = insertelement <1 x i64> undef, i64 %t, i32 0
  %t0 = bitcast <1 x i64> %t1 to x86_mmx
  %t2 = tail call x86_mmx @llvm.x86.mmx.pslli.q(x86_mmx %t0, i32 48)
  %t3 = bitcast x86_mmx %t2 to <1 x i64>
  ret <1 x i64> %t3
}

declare x86_mmx @llvm.x86.mmx.paddus.w(x86_mmx, x86_mmx)
declare x86_mmx @llvm.x86.mmx.padd.b(x86_mmx, x86_mmx)
declare x86_mmx @llvm.x86.mmx.padd.w(x86_mmx, x86_mmx)
declare x86_mmx @llvm.x86.mmx.padd.d(x86_mmx, x86_mmx)
declare x86_mmx @llvm.x86.mmx.padd.q(x86_mmx, x86_mmx)
declare void @llvm.x86.mmx.emms()