reference, declarationdefinition
definition → references, declarations, derived classes, virtual overrides
reference to multiple definitions → definitions
unreferenced
    1
    2
    3
    4
    5
    6
    7
    8
    9
   10
   11
   12
   13
   14
   15
   16
   17
   18
   19
   20
   21
   22
   23
   24
   25
   26
   27
   28
   29
   30
   31
   32
   33
   34
   35
   36
   37
   38
   39
   40
   41
   42
   43
   44
   45
   46
   47
   48
   49
   50
   51
   52
   53
   54
   55
   56
   57
   58
   59
   60
   61
   62
   63
   64
   65
   66
   67
   68
   69
   70
   71
   72
   73
   74
   75
   76
   77
   78
   79
   80
   81
   82
   83
   84
   85
   86
   87
   88
   89
   90
   91
   92
   93
   94
   95
   96
   97
   98
   99
  100
  101
  102
  103
  104
  105
  106
  107
  108
  109
  110
  111
  112
  113
  114
  115
  116
  117
  118
  119
  120
  121
  122
  123
  124
  125
  126
  127
  128
  129
  130
  131
  132
  133
  134
  135
  136
  137
  138
  139
  140
  141
  142
  143
  144
  145
  146
  147
  148
  149
  150
  151
  152
  153
  154
  155
  156
  157
  158
  159
  160
  161
  162
  163
  164
  165
  166
  167
  168
  169
  170
  171
  172
  173
  174
  175
  176
  177
  178
  179
  180
  181
  182
  183
  184
; RUN: llc < %s -march=nvptx64 -mcpu=sm_35 -O0 | FileCheck %s --check-prefix PTX
; RUN: opt < %s -S -nvptx-lower-aggr-copies | FileCheck %s --check-prefix IR

; Verify that the NVPTXLowerAggrCopies pass works as expected - calls to
; llvm.mem* intrinsics get lowered to loops.

target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "nvptx64-unknown-unknown"

declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) #1
declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) #1
declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) #1

define i8* @memcpy_caller(i8* %dst, i8* %src, i64 %n) #0 {
entry:
  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %n, i1 false)
  ret i8* %dst

; IR-LABEL:   @memcpy_caller
; IR:         entry:
; IR:         [[Cond:%[0-9]+]] = icmp ne i64 %n, 0
; IR:         br i1 [[Cond]], label %loop-memcpy-expansion, label %post-loop-memcpy-expansion

; IR:         loop-memcpy-expansion:
; IR:         %loop-index = phi i64 [ 0, %entry ], [ [[IndexInc:%[0-9]+]], %loop-memcpy-expansion ]
; IR:         [[SrcGep:%[0-9]+]] = getelementptr inbounds i8, i8* %src, i64 %loop-index
; IR:         [[Load:%[0-9]+]] = load i8, i8* [[SrcGep]]
; IR:         [[DstGep:%[0-9]+]] = getelementptr inbounds i8, i8* %dst, i64 %loop-index
; IR:         store i8 [[Load]], i8* [[DstGep]]
; IR:         [[IndexInc]] = add i64 %loop-index, 1
; IR:         [[Cond2:%[0-9]+]] = icmp ult i64 [[IndexInc]], %n
; IR:         br i1 [[Cond2]], label %loop-memcpy-expansion, label %post-loop-memcpy-expansion

; IR-LABEL:   post-loop-memcpy-expansion:
; IR:         ret i8* %dst

; PTX-LABEL:  .visible .func (.param .b64 func_retval0) memcpy_caller
; PTX:        LBB[[LABEL:[_0-9]+]]:
; PTX:        ld.u8 %rs[[REG:[0-9]+]]
; PTX:        st.u8 [%rd{{[0-9]+}}], %rs[[REG]]
; PTX:        add.s64 %rd[[COUNTER:[0-9]+]], %rd{{[0-9]+}}, 1
; PTX:        setp.lt.u64 %p[[PRED:[0-9]+]], %rd[[COUNTER]], %rd
; PTX:        @%p[[PRED]] bra LBB[[LABEL]]

}

define i8* @memcpy_volatile_caller(i8* %dst, i8* %src, i64 %n) #0 {
entry:
  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %n, i1 true)
  ret i8* %dst

; IR-LABEL:   @memcpy_volatile_caller
; IR:         entry:
; IR:         [[Cond:%[0-9]+]] = icmp ne i64 %n, 0
; IR:         br i1 [[Cond]], label %loop-memcpy-expansion, label %post-loop-memcpy-expansion

; IR:         loop-memcpy-expansion:
; IR:         %loop-index = phi i64 [ 0, %entry ], [ [[IndexInc:%[0-9]+]], %loop-memcpy-expansion ]
; IR:         [[SrcGep:%[0-9]+]] = getelementptr inbounds i8, i8* %src, i64 %loop-index
; IR:         [[Load:%[0-9]+]] = load volatile i8, i8* [[SrcGep]]
; IR:         [[DstGep:%[0-9]+]] = getelementptr inbounds i8, i8* %dst, i64 %loop-index
; IR:         store volatile i8 [[Load]], i8* [[DstGep]]
; IR:         [[IndexInc]] = add i64 %loop-index, 1
; IR:         [[Cond2:%[0-9]+]] = icmp ult i64 [[IndexInc]], %n
; IR:         br i1 [[Cond2]], label %loop-memcpy-expansion, label %post-loop-memcpy-expansion

; IR-LABEL:   post-loop-memcpy-expansion:
; IR:         ret i8* %dst


; PTX-LABEL:  .visible .func (.param .b64 func_retval0) memcpy_volatile_caller
; PTX:        LBB[[LABEL:[_0-9]+]]:
; PTX:        ld.volatile.u8 %rs[[REG:[0-9]+]]
; PTX:        st.volatile.u8 [%rd{{[0-9]+}}], %rs[[REG]]
; PTX:        add.s64 %rd[[COUNTER:[0-9]+]], %rd{{[0-9]+}}, 1
; PTX:        setp.lt.u64 %p[[PRED:[0-9]+]], %rd[[COUNTER]], %rd
; PTX:        @%p[[PRED]] bra LBB[[LABEL]]
}

define i8* @memcpy_casting_caller(i32* %dst, i32* %src, i64 %n) #0 {
entry:
  %0 = bitcast i32* %dst to i8*
  %1 = bitcast i32* %src to i8*
  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* %1, i64 %n, i1 false)
  ret i8* %0

; Check that casts in calls to memcpy are handled properly
; IR-LABEL:   @memcpy_casting_caller
; IR:         [[DSTCAST:%[0-9]+]] = bitcast i32* %dst to i8*
; IR:         [[SRCCAST:%[0-9]+]] = bitcast i32* %src to i8*
; IR:         getelementptr inbounds i8, i8* [[SRCCAST]]
; IR:         getelementptr inbounds i8, i8* [[DSTCAST]]
}

define i8* @memcpy_known_size(i8* %dst, i8* %src) {
entry:
  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 144, i1 false)
  ret i8* %dst

; Check that calls with compile-time constant size are handled correctly
; IR-LABEL:    @memcpy_known_size
; IR:          entry:
; IR:          br label %load-store-loop
; IR:          load-store-loop:
; IR:          %loop-index = phi i64 [ 0, %entry ], [ [[IndexInc:%[0-9]+]], %load-store-loop ]
; IR:          [[SrcGep:%[0-9]+]] = getelementptr inbounds i8, i8* %src, i64 %loop-index
; IR:          [[Load:%[0-9]+]] = load i8, i8* [[SrcGep]]
; IR:          [[DstGep:%[0-9]+]] = getelementptr inbounds i8, i8* %dst, i64 %loop-index
; IR:          store i8 [[Load]], i8* [[DstGep]]
; IR:          [[IndexInc]] = add i64 %loop-index, 1
; IR:          [[Cond:%[0-9]+]] = icmp ult i64 %3, 144
; IR:          br i1 [[Cond]], label %load-store-loop, label %memcpy-split
}

define i8* @memset_caller(i8* %dst, i32 %c, i64 %n) #0 {
entry:
  %0 = trunc i32 %c to i8
  tail call void @llvm.memset.p0i8.i64(i8* %dst, i8 %0, i64 %n, i1 false)
  ret i8* %dst

; IR-LABEL:   @memset_caller
; IR:         [[VAL:%[0-9]+]] = trunc i32 %c to i8
; IR:         [[CMPREG:%[0-9]+]] = icmp eq i64 0, %n
; IR:         br i1 [[CMPREG]], label %split, label %loadstoreloop
; IR:         loadstoreloop:
; IR:         [[STOREPTR:%[0-9]+]] = getelementptr inbounds i8, i8* %dst, i64
; IR-NEXT:    store i8 [[VAL]], i8* [[STOREPTR]]

; PTX-LABEL:  .visible .func (.param .b64 func_retval0) memset_caller(
; PTX:        ld.param.u32 %r[[C:[0-9]+]]
; PTX:        cvt.u16.u32  %rs[[REG:[0-9]+]], %r[[C]];
; PTX:        LBB[[LABEL:[_0-9]+]]:
; PTX:        st.u8 [%rd{{[0-9]+}}], %rs[[REG]]
; PTX:        add.s64 %rd[[COUNTER:[0-9]+]], %rd{{[0-9]+}}, 1
; PTX:        setp.lt.u64 %p[[PRED:[0-9]+]], %rd[[COUNTER]], %rd
; PTX:        @%p[[PRED]] bra LBB[[LABEL]]
}

define i8* @volatile_memset_caller(i8* %dst, i32 %c, i64 %n) #0 {
entry:
  %0 = trunc i32 %c to i8
  tail call void @llvm.memset.p0i8.i64(i8* %dst, i8 %0, i64 %n, i1 true)
  ret i8* %dst

; IR-LABEL:   @volatile_memset_caller
; IR:         [[VAL:%[0-9]+]] = trunc i32 %c to i8
; IR:         loadstoreloop:
; IR:         [[STOREPTR:%[0-9]+]] = getelementptr inbounds i8, i8* %dst, i64
; IR-NEXT:    store volatile i8 [[VAL]], i8* [[STOREPTR]]
}

define i8* @memmove_caller(i8* %dst, i8* %src, i64 %n) #0 {
entry:
  tail call void @llvm.memmove.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %n, i1 false)
  ret i8* %dst

; IR-LABEL:   @memmove_caller
; IR:         icmp ult i8* %src, %dst
; IR:         [[PHIVAL:%[0-9a-zA-Z_]+]] = phi i64
; IR-NEXT:    %index_ptr = sub i64 [[PHIVAL]], 1
; IR:         [[FWDPHIVAL:%[0-9a-zA-Z_]+]] = phi i64
; IR:         {{%[0-9a-zA-Z_]+}} = add i64 [[FWDPHIVAL]], 1

; PTX-LABEL:  .visible .func (.param .b64 func_retval0) memmove_caller(
; PTX:        ld.param.u64 %rd[[N:[0-9]+]]
; PTX-DAG:    setp.eq.s64 %p[[NEQ0:[0-9]+]], %rd[[N]], 0
; PTX-DAG:    setp.ge.u64 %p[[SRC_GT_THAN_DST:[0-9]+]], %rd{{[0-9]+}}, %rd{{[0-9]+}}
; PTX-NEXT:   @%p[[SRC_GT_THAN_DST]] bra LBB[[FORWARD_BB:[0-9_]+]]
; -- this is the backwards copying BB
; PTX:        @%p[[NEQ0]] bra LBB[[EXIT:[0-9_]+]]
; PTX:        add.s64 %rd{{[0-9]}}, %rd{{[0-9]}}, -1
; PTX:        ld.u8 %rs[[ELEMENT:[0-9]+]]
; PTX:        st.u8 [%rd{{[0-9]+}}], %rs[[ELEMENT]]
; -- this is the forwards copying BB
; PTX:        LBB[[FORWARD_BB]]:
; PTX:        @%p[[NEQ0]] bra LBB[[EXIT]]
; PTX:        ld.u8 %rs[[ELEMENT2:[0-9]+]]
; PTX:        st.u8 [%rd{{[0-9]+}}], %rs[[ELEMENT2]]
; PTX:        add.s64 %rd{{[0-9]+}}, %rd{{[0-9]+}}, 1
; -- exit block
; PTX:        LBB[[EXIT]]:
; PTX-NEXT:   st.param.b64 [func_retval0
; PTX-NEXT:   ret
}