reference, declarationdefinition
definition → references, declarations, derived classes, virtual overrides
reference to multiple definitions → definitions
unreferenced
    1
    2
    3
    4
    5
    6
    7
    8
    9
   10
   11
   12
   13
   14
   15
   16
   17
   18
   19
   20
   21
   22
   23
   24
   25
   26
   27
   28
   29
   30
   31
   32
   33
   34
   35
   36
   37
   38
   39
   40
   41
   42
   43
   44
   45
   46
   47
   48
   49
   50
   51
   52
   53
   54
   55
   56
   57
   58
   59
   60
   61
   62
   63
   64
   65
   66
   67
   68
   69
   70
   71
   72
   73
   74
   75
   76
   77
   78
   79
   80
   81
   82
   83
   84
   85
   86
   87
   88
   89
   90
   91
   92
   93
   94
   95
   96
   97
   98
   99
  100
  101
  102
  103
  104
  105
  106
  107
  108
  109
  110
  111
  112
  113
  114
  115
  116
  117
  118
  119
  120
  121
  122
  123
  124
  125
  126
  127
  128
  129
  130
  131
  132
  133
  134
  135
  136
  137
  138
  139
; RUN: llc -verify-machineinstrs -mcpu=pwr9 -enable-ppc-quad-precision \
; RUN:   -mtriple=powerpc64le-unknown-unknown -ppc-vsr-nums-as-vr \
; RUN:   -ppc-asm-full-reg-names < %s | FileCheck %s

@A = common global fp128 0xL00000000000000000000000000000000, align 16
@B = common global fp128 0xL00000000000000000000000000000000, align 16
@C = common global fp128 0xL00000000000000000000000000000000, align 16
@D = common global fp128 0xL00000000000000000000000000000000, align 16

define fp128 @testSqrtOdd(fp128 %a) {
entry:
  %0 = call fp128 @llvm.ppc.sqrtf128.round.to.odd(fp128 %a)
  ret fp128 %0
; CHECK-LABEL: testSqrtOdd
; CHECK: xssqrtqpo v2, v2
; CHECK: blr
}

declare fp128 @llvm.ppc.sqrtf128.round.to.odd(fp128)

define void @testFMAOdd(fp128 %a, fp128 %b, fp128 %c) {
entry:
  %0 = call fp128 @llvm.ppc.fmaf128.round.to.odd(fp128 %a, fp128 %b, fp128 %c)
  store fp128 %0, fp128* @A, align 16
  %sub = fsub fp128 0xL00000000000000008000000000000000, %c
  %1 = call fp128 @llvm.ppc.fmaf128.round.to.odd(fp128 %a, fp128 %b, fp128 %sub)
  store fp128 %1, fp128* @B, align 16
  %2 = call fp128 @llvm.ppc.fmaf128.round.to.odd(fp128 %a, fp128 %b, fp128 %c)
  %sub1 = fsub fp128 0xL00000000000000008000000000000000, %2
  store fp128 %sub1, fp128* @C, align 16
  %sub2 = fsub fp128 0xL00000000000000008000000000000000, %c
  %3 = call fp128 @llvm.ppc.fmaf128.round.to.odd(fp128 %a, fp128 %b, fp128 %sub2)
  %sub3 = fsub fp128 0xL00000000000000008000000000000000, %3
  store fp128 %sub3, fp128* @D, align 16
  ret void
; CHECK-LABEL: testFMAOdd
; CHECK-DAG: xsmaddqpo v{{[0-9]+}}, v2, v3
; CHECK-DAG: xsmsubqpo v{{[0-9]+}}, v2, v3
; CHECK-DAG: xsnmaddqpo v{{[0-9]+}}, v2, v3
; CHECK-DAG: xsnmsubqpo v{{[0-9]+}}, v2, v3
; CHECK: blr
}

declare fp128 @llvm.ppc.fmaf128.round.to.odd(fp128, fp128, fp128)

define fp128 @testAddOdd(fp128 %a, fp128 %b) {
entry:
  %0 = call fp128 @llvm.ppc.addf128.round.to.odd(fp128 %a, fp128 %b)
  ret fp128 %0
; CHECK-LABEL: testAddOdd
; CHECK: xsaddqpo v2, v2, v3
; CHECK: blr
}

declare fp128 @llvm.ppc.addf128.round.to.odd(fp128, fp128)

define fp128 @testSubOdd(fp128 %a, fp128 %b) {
entry:
  %0 = call fp128 @llvm.ppc.subf128.round.to.odd(fp128 %a, fp128 %b)
  ret fp128 %0
; CHECK-LABEL: testSubOdd
; CHECK: xssubqpo v2, v2, v3
; CHECK: blr
}

; Function Attrs: nounwind readnone
declare fp128 @llvm.ppc.subf128.round.to.odd(fp128, fp128)

; Function Attrs: noinline nounwind optnone
define fp128 @testMulOdd(fp128 %a, fp128 %b) {
entry:
  %0 = call fp128 @llvm.ppc.mulf128.round.to.odd(fp128 %a, fp128 %b)
  ret fp128 %0
; CHECK-LABEL: testMulOdd
; CHECK: xsmulqpo v2, v2, v3
; CHECK: blr
}

; Function Attrs: nounwind readnone
declare fp128 @llvm.ppc.mulf128.round.to.odd(fp128, fp128)

define fp128 @testDivOdd(fp128 %a, fp128 %b) {
entry:
  %0 = call fp128 @llvm.ppc.divf128.round.to.odd(fp128 %a, fp128 %b)
  ret fp128 %0
; CHECK-LABEL: testDivOdd
; CHECK: xsdivqpo v2, v2, v3
; CHECK: blr
}

declare fp128 @llvm.ppc.divf128.round.to.odd(fp128, fp128)

define double @testTruncOdd(fp128 %a) {
entry:
  %0 = call double @llvm.ppc.truncf128.round.to.odd(fp128 %a)
  ret double %0
; CHECK-LABEL: testTruncOdd
; CHECK: xscvqpdpo v2, v2
; CHECK: xscpsgndp f1, v2, v2
; CHECK: blr
}

declare double @llvm.ppc.truncf128.round.to.odd(fp128)

; Function Attrs: noinline nounwind optnone
define fp128 @insert_exp_qp(i64 %b) {
entry:
  %b.addr = alloca i64, align 8
  store i64 %b, i64* %b.addr, align 8
  %0 = load fp128, fp128* @A, align 16
  %1 = load i64, i64* %b.addr, align 8
  %2 = call fp128 @llvm.ppc.scalar.insert.exp.qp(fp128 %0, i64 %1)
  ret fp128 %2
; CHECK-LABEL: insert_exp_qp
; CHECK-DAG: mtfprd [[FPREG:f[0-9]+]], r3
; CHECK-DAG: lxvx [[VECREG:v[0-9]+]]
; CHECK: xsiexpqp v2, [[VECREG]], [[FPREG]]
; CHECK: blr
}

; Function Attrs: nounwind readnone
declare fp128 @llvm.ppc.scalar.insert.exp.qp(fp128, i64)

; Function Attrs: noinline nounwind optnone
define i64 @extract_exp() {
entry:
  %0 = load fp128, fp128* @A, align 16
  %1 = call i64 @llvm.ppc.scalar.extract.expq(fp128 %0)
  ret i64 %1
; CHECK-LABEL: extract_exp
; CHECK: lxvx [[VECIN:v[0-9]+]]
; CHECK: xsxexpqp [[VECOUT:v[0-9]+]], [[VECIN]]
; CHECK: mfvsrd r3, [[VECOUT]]
; CHECK: blr
}

; Function Attrs: nounwind readnone
declare i64 @llvm.ppc.scalar.extract.expq(fp128)