reference, declaration → definition definition → references, declarations, derived classes, virtual overrides reference to multiple definitions → definitions unreferenced |
986 MachineFunction *MF = MI->getParent()->getParent(); 987 MachineBasicBlock *MBB = MI->getParent(); 991 DebugLoc DL = MI->getDebugLoc(); 995 MachineOperand &FIOp = MI->getOperand(FIOperandNum); 996 int Index = MI->getOperand(FIOperandNum).getIndex(); 1000 switch (MI->getOpcode()) { 1010 spillSGPR(MI, Index, RS); 1023 restoreSGPR(MI, Index, RS); 1041 const MachineOperand *VData = TII->getNamedOperand(*MI, 1043 assert(TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg() == 1046 buildSpillLoadStore(MI, AMDGPU::BUFFER_STORE_DWORD_OFFSET, 1049 TII->getNamedOperand(*MI, AMDGPU::OpName::srsrc)->getReg(), 1051 TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm(), 1052 *MI->memoperands_begin(), 1054 MFI->addToSpilledVGPRs(getNumSubRegsForSpillOp(MI->getOpcode())); 1055 MI->eraseFromParent(); 1071 const MachineOperand *VData = TII->getNamedOperand(*MI, 1073 assert(TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg() == 1076 buildSpillLoadStore(MI, AMDGPU::BUFFER_LOAD_DWORD_OFFSET, 1079 TII->getNamedOperand(*MI, AMDGPU::OpName::srsrc)->getReg(), 1081 TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm(), 1082 *MI->memoperands_begin(), 1084 MI->eraseFromParent(); 1089 const DebugLoc &DL = MI->getDebugLoc(); 1090 bool IsMUBUF = TII->isMUBUF(*MI); 1100 RS->scavengeRegister(&AMDGPU::SReg_32_XM0RegClass, MI, 0, false); 1105 bool IsCopy = MI->getOpcode() == AMDGPU::V_MOV_B32_e32; 1107 MI->getOperand(0).getReg() : 1108 RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, 0); 1110 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_SUB_U32), DiffReg) 1117 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_LSHRREV_B32_e64), ResultReg) 1121 if (auto MIB = TII->getAddNoCarry(*MBB, MI, DL, ResultReg, *RS)) { 1161 RS->scavengeRegister(&AMDGPU::SReg_32_XM0RegClass, MI, 0, false); 1164 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_LSHR_B32), ScaledReg) 1167 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_U32), ScaledReg) 1170 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::COPY), ResultReg) 1175 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_SUB_U32), ScaledReg) 1178 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_LSHL_B32), ScaledReg) 1187 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_U32), FrameReg) 1194 MI->eraseFromParent(); 1203 AMDGPU::getNamedOperandIdx(MI->getOpcode(), 1206 assert(TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg() == 1209 TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->setReg(FrameReg); 1213 = TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm(); 1217 buildMUBUFOffsetLoadStore(ST, FrameInfo, MI, Index, NewOffset)) { 1218 MI->eraseFromParent(); 1228 if (!TII->isImmOperandLegal(*MI, FIOperandNum, FIOp)) { 1229 Register TmpReg = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, 0); 1230 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_MOV_B32_e32), TmpReg)