reference, declarationdefinition
definition → references, declarations, derived classes, virtual overrides
reference to multiple definitions → definitions
unreferenced

References

include/llvm/CodeGen/MachinePipeliner.h
  202         RegClassInfo(rci), II_setByPragma(II), Topo(SUnits, &ExitSU) {
include/llvm/CodeGen/ScheduleDAG.h
  678       return nodes_iterator(G->SUnits.begin());
  681       return nodes_iterator(G->SUnits.end());
include/llvm/CodeGen/ScheduleDAGInstrs.h
  386     const SUnit *Addr = SUnits.empty() ? nullptr : &SUnits[0];
  386     const SUnit *Addr = SUnits.empty() ? nullptr : &SUnits[0];
  388     SUnits.emplace_back(MI, (unsigned)SUnits.size());
  388     SUnits.emplace_back(MI, (unsigned)SUnits.size());
  389     assert((Addr == nullptr || Addr == &SUnits[0]) &&
  391     return &SUnits.back();
lib/CodeGen/DFAPacketizer.cpp
  246   for (SUnit &SU : VLIWScheduler->SUnits)
lib/CodeGen/MachinePipeliner.cpp
  664   for (auto &SU : SUnits) {
  760   for (SUnit &I : SUnits) {
  843   for (SUnit &I : SUnits) {
 1255   swapAntiDependences(SUnits);
 1257   Circuits Cir(SUnits, Topo);
 1260   for (int i = 0, e = SUnits.size(); i != e; ++i) {
 1266   swapAntiDependences(SUnits);
 1288   for (SUnit &SU : DAG->SUnits) {
 1364   ScheduleInfo.resize(SUnits.size());
 1370       const SUnit &SU = SUnits[*I];
 1382     SUnit *SU = &SUnits[*I];
 1406     SUnit *SU = &SUnits[*I];
 1429     for (unsigned i = 0; i < SUnits.size(); i++) {
 1431       dbgs() << "\t   ASAP = " << getASAP(&SUnits[i]) << "\n";
 1432       dbgs() << "\t   ALAP = " << getALAP(&SUnits[i]) << "\n";
 1433       dbgs() << "\t   MOV  = " << getMOV(&SUnits[i]) << "\n";
 1434       dbgs() << "\t   D    = " << getDepth(&SUnits[i]) << "\n";
 1435       dbgs() << "\t   H    = " << getHeight(&SUnits[i]) << "\n";
 1436       dbgs() << "\t   ZLD  = " << getZeroLatencyDepth(&SUnits[i]) << "\n";
 1437       dbgs() << "\t   ZLH  = " << getZeroLatencyHeight(&SUnits[i]) << "\n";
 1726   for (unsigned i = 0; i < SUnits.size(); ++i) {
 1727     SUnit *SU = &SUnits[i];
 2643   for (int i = 0, e = SSD->SUnits.size(); i < e; ++i) {
 2644     SUnit &SU = SSD->SUnits[i];
 2830   for (int i = 0, e = SSD->SUnits.size(); i != e; ++i) {
 2831     SUnit *SU = &SSD->SUnits[i];
lib/CodeGen/MachineScheduler.cpp
  828   for (SUnit &SU : SUnits) {
  995   for (SUnit &SU : SUnits)
 1167   for (const SUnit &SU : SUnits) {
 1284   DFSResult->resize(SUnits.size());
 1285   DFSResult->compute(SUnits);
 1602   for (SUnit &SU : DAG->SUnits) {
 1607     unsigned ChainPredID = DAG->SUnits.size();
 1823   for (SUnit &SU : DAG->SUnits) {
 1892   for (SUnit &SU : DAG->SUnits) {
lib/CodeGen/MacroFusion.cpp
  102       for (SUnit &SU : DAG.SUnits) {
  135     for (SUnit &ISU : DAG->SUnits)
lib/CodeGen/PostRASchedulerList.cpp
  396       AntiDepBreak->BreakAntiDependencies(SUnits, RegionBegin, RegionEnd,
  418   AvailableQueue.initNodes(SUnits);
  535   for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
  537     if (!SUnits[i].NumPredsLeft && !SUnits[i].isAvailable) {
  537     if (!SUnits[i].NumPredsLeft && !SUnits[i].isAvailable) {
  538       AvailableQueue.push(&SUnits[i]);
  539       SUnits[i].isAvailable = true;
  550   Sequence.reserve(SUnits.size());
lib/CodeGen/ScheduleDAG.cpp
   65   SUnits.clear();
  393   for (const SUnit &SUnit : SUnits) {
  434   return SUnits.size() - DeadNodes;
lib/CodeGen/ScheduleDAGInstrs.cpp
  116                              Type::getVoidTy(mf.getFunction().getContext()))), Topo(SUnits, &ExitSU) {
  563   SUnits.reserve(NumRegionInstrs);
  741     PDiffs->init(SUnits.size());
 1060   SUnit *newBarrierChain = &SUnits[*(NodeNums.end() - N)];
 1166   for (const SUnit &SU : SUnits)
lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp
  270       LoadSU = &SUnits[LoadNode->getNodeId()];
  531   if (!SUnits.empty()) {
  532     SUnit *RootSU = &SUnits[DAG->getRoot().getNode()->getNodeId()];
  542   Sequence.reserve(SUnits.size());
lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
  194       Topo(SUnits, nullptr) {
  278     unsigned NumSUnits = SUnits.size();
  288     unsigned NumSUnits = SUnits.size();
  376   AvailableQueue->initNodes(SUnits);
  587         SUnit *Def = &SUnits[N->getNodeId()];
 1005     LoadSU = &SUnits[LoadNode->getNodeId()];
 1023     NewSU = &SUnits[N->getNodeId()];
 1602   if (!SUnits.empty()) {
 1603     SUnit *RootSU = &SUnits[DAG->getRoot().getNode()->getNodeId()];
 1611   Sequence.reserve(SUnits.size());
lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp
   71   if (!SUnits.empty())
   72     Addr = &SUnits[0];
   74   SUnits.emplace_back(N, (unsigned)SUnits.size());
   74   SUnits.emplace_back(N, (unsigned)SUnits.size());
   75   assert((Addr == nullptr || Addr == &SUnits[0]) &&
   77   SUnits.back().OrigNode = &SUnits.back();
   77   SUnits.back().OrigNode = &SUnits.back();
   78   SUnit *SU = &SUnits.back();
  337   SUnits.reserve(NumNodes * 2);
  430       SUnit *SrcSU = &SUnits[SrcN->getNodeId()];
  443   for (unsigned su = 0, e = SUnits.size(); su != e; ++su) {
  444     SUnit *SU = &SUnits[su];
  475         SUnit *OpSU = &SUnits[OpN->getNodeId()];
  697   for (const SUnit &SU : SUnits)
lib/CodeGen/SelectionDAG/ScheduleDAGVLIW.cpp
  100   AvailableQueue->initNodes(SUnits);
  173   for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
  175     if (SUnits[i].Preds.empty()) {
  176       AvailableQueue->push(&SUnits[i]);
  177       SUnits[i].isAvailable = true;
  184   Sequence.reserve(SUnits.size());
lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp
  301       GW.emitEdge(nullptr, -1, &SUnits[N->getNodeId()], -1,
lib/Target/AMDGPU/AMDGPUSubtarget.cpp
  728     for (SUnit &SU : DAG->SUnits) {
  849     if (!TSchedModel || DAG->SUnits.empty())
  856     auto LastSALU = DAG->SUnits.begin();
  857     auto E = DAG->SUnits.end();
  859     for (SUnit &SU : DAG->SUnits) {
lib/Target/AMDGPU/GCNILPSched.cpp
  293   auto &SUnits = const_cast<ScheduleDAG&>(DAG).SUnits;
lib/Target/AMDGPU/GCNIterativeScheduler.cpp
  218     Sch.scheduleRegion(Rgn, Sch.SUnits, SaveMaxRP);
lib/Target/AMDGPU/GCNMinRegStrategy.cpp
   66   void initNumPreds(const decltype(ScheduleDAG::SUnits) &SUnits);
   86 void GCNMinRegScheduler::initNumPreds(const decltype(ScheduleDAG::SUnits) &SUnits) {
  234   const auto &SUnits = DAG.SUnits;
lib/Target/AMDGPU/R600MachineScheduler.cpp
  132     for (unsigned i = 0; i < DAG->SUnits.size(); i++) {
  133       const SUnit &S = DAG->SUnits[i];
lib/Target/AMDGPU/SIMachineScheduler.cpp
  487     if (SuccSU->NodeNum >= DAG->SUnits.size())
  653   if (SU->NodeNum >= DAG->SUnits.size())
  659   unsigned DAGSize = DAG->SUnits.size();
  662     SUnit *SU = &DAG->SUnits[i];
  680   unsigned DAGSize = DAG->SUnits.size();
  688     SUnit *SU = &DAG->SUnits[i];
  704     const SUnit &SU = DAG->SUnits[SUNum];
  727         SubGraph = DAG->GetTopo()->GetSubGraph(SU, DAG->SUnits[j],
  731         SubGraph = DAG->GetTopo()->GetSubGraph(DAG->SUnits[j], SU,
  755             if (hasDataDependencyPred(DAG->SUnits[k], DAG->SUnits[j])) {
  755             if (hasDataDependencyPred(DAG->SUnits[k], DAG->SUnits[j])) {
  763           if (hasDataDependencyPred(SU, DAG->SUnits[j])) {
  804   unsigned DAGSize = DAG->SUnits.size();
  817     SUnit *SU = &DAG->SUnits[SUNum];
  859     SUnit *SU = &DAG->SUnits[SUNum];
  898   unsigned DAGSize = DAG->SUnits.size();
  905     SUnit *SU = &DAG->SUnits[i];
  927   unsigned DAGSize = DAG->SUnits.size();
  942     SUnit *SU = &DAG->SUnits[SUNum];
  976   unsigned DAGSize = DAG->SUnits.size();
  986     SUnit *SU = &DAG->SUnits[i];
 1009   unsigned DAGSize = DAG->SUnits.size();
 1012     SUnit *SU = &DAG->SUnits[SUNum];
 1035   unsigned DAGSize = DAG->SUnits.size();
 1038     SUnit *SU = &DAG->SUnits[SUNum];
 1056   unsigned DAGSize = DAG->SUnits.size();
 1059     SUnit *SU = &DAG->SUnits[SUNum];
 1077   unsigned DAGSize = DAG->SUnits.size();
 1081     SUnit *SU = &DAG->SUnits[SUNum];
 1087     SUnit *SU = &DAG->SUnits[SUNum];
 1116   unsigned DAGSize = DAG->SUnits.size();
 1120     SUnit *SU = &DAG->SUnits[SUNum];
 1152     const SUnit &SU = DAG->SUnits[SUNum];
 1165         SubGraph = DAG->GetTopo()->GetSubGraph(SU, DAG->SUnits[j],
 1169         SubGraph = DAG->GetTopo()->GetSubGraph(DAG->SUnits[j], SU,
 1177           if (!SIInstrInfo::isEXP(*DAG->SUnits[k].getInstr()))
 1194   unsigned DAGSize = DAG->SUnits.size();
 1227     SUnit *SU = &DAG->SUnits[i];
 1241     SUnit *SU = &DAG->SUnits[i];
 1353   PosOld.reserve(DAG->SUnits.size());
 1354   PosNew.reserve(DAG->SUnits.size());
 1831    unsigned DAGSize = SUnits.size();
 1836     SUnit *SU = &SUnits[ScheduledSUnits[i]];
 1898   for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
 1899     SUnits[i].isScheduled = false;
 1900     SUnits[i].WeakPredsLeft = SUnitsLinksBackup[i].WeakPredsLeft;
 1901     SUnits[i].NumPredsLeft = SUnitsLinksBackup[i].NumPredsLeft;
 1902     SUnits[i].WeakSuccsLeft = SUnitsLinksBackup[i].WeakSuccsLeft;
 1903     SUnits[i].NumSuccsLeft = SUnitsLinksBackup[i].NumSuccsLeft;
 1948   SUnitsLinksBackup = SUnits;
 1953   IsLowLatencySU.resize(SUnits.size(), 0);
 1954   LowLatencyOffset.resize(SUnits.size(), 0);
 1955   IsHighLatencySU.resize(SUnits.size(), 0);
 1957   for (unsigned i = 0, e = (unsigned)SUnits.size(); i != e; ++i) {
 1958     SUnit *SU = &SUnits[i];
 2018   ScheduledSUnitsInv.resize(SUnits.size());
 2020   for (unsigned i = 0, e = (unsigned)SUnits.size(); i != e; ++i) {
 2033     SUnit *SU = &SUnits[*I];
lib/Target/Hexagon/HexagonMachineScheduler.cpp
  211              for (unsigned su = 0, e = SUnits.size(); su != e;
  212                   ++su) if (SUnits[su].getHeight() > maxH) maxH =
  213                  SUnits[su].getHeight();
  216              for (unsigned su = 0, e = SUnits.size(); su != e;
  217                   ++su) if (SUnits[su].getDepth() > maxD) maxD =
  218                  SUnits[su].getDepth();
lib/Target/Hexagon/HexagonMachineScheduler.h
  185         for (auto &SU : DAG->SUnits)
lib/Target/Hexagon/HexagonSubtarget.cpp
  129   for (SUnit &SU : DAG->SUnits) {
  142   for (SUnit &SU : DAG->SUnits) {
  205   for (unsigned su = 0, e = DAG->SUnits.size(); su != e; ++su) {
  207     if (DAG->SUnits[su].getInstr()->isCall())
  208       LastSequentialCall = &DAG->SUnits[su];
  210     else if (DAG->SUnits[su].getInstr()->isCompare() && LastSequentialCall)
  211       DAG->addEdge(&DAG->SUnits[su], SDep(LastSequentialCall, SDep::Barrier));
  214              shouldTFRICallBind(HII, DAG->SUnits[su], DAG->SUnits[su+1]))
  214              shouldTFRICallBind(HII, DAG->SUnits[su], DAG->SUnits[su+1]))
  215       DAG->addEdge(&DAG->SUnits[su], SDep(&DAG->SUnits[su-1], SDep::Barrier));
  215       DAG->addEdge(&DAG->SUnits[su], SDep(&DAG->SUnits[su-1], SDep::Barrier));
  231       const MachineInstr *MI = DAG->SUnits[su].getInstr();
  245             LastVRegUse[VRegHoldingReg[MO.getReg()]] = &DAG->SUnits[su];
  250                   LastVRegUse[*AI] != &DAG->SUnits[su])
  252                 DAG->addEdge(&DAG->SUnits[su], SDep(LastVRegUse[*AI], SDep::Barrier));
  271   for (unsigned i = 0, e = DAG->SUnits.size(); i != e; ++i) {
  272     SUnit &S0 = DAG->SUnits[i];
  285       SUnit &S1 = DAG->SUnits[j];