reference, declarationdefinition
definition → references, declarations, derived classes, virtual overrides
reference to multiple definitions → definitions
unreferenced

References

lib/Target/AMDGPU/SIInsertWaitcnts.cpp
 1203   if (TII->isDS(Inst) && TII->usesLGKM_CNT(Inst)) {
 1203   if (TII->isDS(Inst) && TII->usesLGKM_CNT(Inst)) {
 1204     if (TII->isAlwaysGDS(Inst.getOpcode()) ||
 1205         TII->hasModifiersSet(Inst, AMDGPU::OpName::gds)) {
 1206       ScoreBrackets->updateByEvent(TII, TRI, MRI, GDS_ACCESS, Inst);
 1207       ScoreBrackets->updateByEvent(TII, TRI, MRI, GDS_GPR_LOCK, Inst);
 1209       ScoreBrackets->updateByEvent(TII, TRI, MRI, LDS_ACCESS, Inst);
 1211   } else if (TII->isFLAT(Inst)) {
 1212     assert(Inst.mayLoad() || Inst.mayStore());
 1212     assert(Inst.mayLoad() || Inst.mayStore());
 1214     if (TII->usesVM_CNT(Inst)) {
 1216         ScoreBrackets->updateByEvent(TII, TRI, MRI, VMEM_ACCESS, Inst);
 1217       else if (Inst.mayLoad() &&
 1218                AMDGPU::getAtomicRetOp(Inst.getOpcode()) == -1)
 1219         ScoreBrackets->updateByEvent(TII, TRI, MRI, VMEM_READ_ACCESS, Inst);
 1221         ScoreBrackets->updateByEvent(TII, TRI, MRI, VMEM_WRITE_ACCESS, Inst);
 1224     if (TII->usesLGKM_CNT(Inst)) {
 1225       ScoreBrackets->updateByEvent(TII, TRI, MRI, LDS_ACCESS, Inst);
 1230       if (mayAccessLDSThroughFlat(Inst))
 1233   } else if (SIInstrInfo::isVMEM(Inst) &&
 1235              Inst.getOpcode() != AMDGPU::BUFFER_WBINVL1 &&
 1236              Inst.getOpcode() != AMDGPU::BUFFER_WBINVL1_SC &&
 1237              Inst.getOpcode() != AMDGPU::BUFFER_WBINVL1_VOL &&
 1238              Inst.getOpcode() != AMDGPU::BUFFER_GL0_INV &&
 1239              Inst.getOpcode() != AMDGPU::BUFFER_GL1_INV) {
 1241       ScoreBrackets->updateByEvent(TII, TRI, MRI, VMEM_ACCESS, Inst);
 1242     else if ((Inst.mayLoad() &&
 1243               AMDGPU::getAtomicRetOp(Inst.getOpcode()) == -1) ||
 1245              (TII->isMIMG(Inst) && !Inst.mayLoad() && !Inst.mayStore()))
 1245              (TII->isMIMG(Inst) && !Inst.mayLoad() && !Inst.mayStore()))
 1245              (TII->isMIMG(Inst) && !Inst.mayLoad() && !Inst.mayStore()))
 1246       ScoreBrackets->updateByEvent(TII, TRI, MRI, VMEM_READ_ACCESS, Inst);
 1247     else if (Inst.mayStore())
 1248       ScoreBrackets->updateByEvent(TII, TRI, MRI, VMEM_WRITE_ACCESS, Inst);
 1251         (Inst.mayStore() || AMDGPU::getAtomicNoRetOp(Inst.getOpcode()) != -1)) {
 1251         (Inst.mayStore() || AMDGPU::getAtomicNoRetOp(Inst.getOpcode()) != -1)) {
 1252       ScoreBrackets->updateByEvent(TII, TRI, MRI, VMW_GPR_LOCK, Inst);
 1254   } else if (TII->isSMRD(Inst)) {
 1255     ScoreBrackets->updateByEvent(TII, TRI, MRI, SMEM_ACCESS, Inst);
 1256   } else if (Inst.isCall()) {
 1257     if (callWaitsOnFunctionReturn(Inst)) {
 1265     switch (Inst.getOpcode()) {
 1268       ScoreBrackets->updateByEvent(TII, TRI, MRI, SQ_MESSAGE, Inst);
 1272       int Imm = TII->getNamedOperand(Inst, AMDGPU::OpName::tgt)->getImm();
 1274         ScoreBrackets->updateByEvent(TII, TRI, MRI, EXP_PARAM_ACCESS, Inst);
 1276         ScoreBrackets->updateByEvent(TII, TRI, MRI, EXP_POS_ACCESS, Inst);
 1278         ScoreBrackets->updateByEvent(TII, TRI, MRI, EXP_GPR_LOCK, Inst);
 1283       ScoreBrackets->updateByEvent(TII, TRI, MRI, SMEM_ACCESS, Inst);